diff --git a/.editorconfig b/.editorconfig index 321808ebaecf..103fe51237c8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,6 +16,14 @@ profile = black indent_style = space indent_size = 2 +[*.md] +indent_style = space +indent_size = 2 + [*.yml] indent_style = space indent_size = 2 + +[*.toml] +indent_style = space +indent_size = 4 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5270bf89ae33..7e0910c449e9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,10 @@ README.md @jafermarq @tanertopal @danieljanes # Flower Baselines -/baselines @jafermarq @tanertopal @danieljanes +/baselines @jafermarq @danieljanes + +# Flower Benchmarks +/benchmarks @jafermarq @danieljanes # Flower Datasets /datasets @jafermarq @tanertopal @danieljanes @@ -27,3 +30,9 @@ README.md @jafermarq @tanertopal @danieljanes # GitHub Actions and Workflows /.github/workflows @Robert-Steiner @tanertopal @danieljanes /.github/actions @Robert-Steiner @tanertopal @danieljanes + +# Docker-related files +/.devcontainer @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/*.Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +/src/docker @Robert-Steiner @Moep90 @tanertopal @danieljanes diff --git a/.github/actions/bootstrap/action.yml b/.github/actions/bootstrap/action.yml index 4cde8dddfa3f..a8a98acdf304 100644 --- a/.github/actions/bootstrap/action.yml +++ b/.github/actions/bootstrap/action.yml @@ -3,7 +3,7 @@ description: "Bootstrap Python environment (install and configure Python version inputs: python-version: description: "Version range or exact version of Python or PyPy to use, using SemVer's version range syntax." - default: 3.8 + default: 3.9 pip-version: description: "Version of pip to be installed using pip" default: 24.1.2 diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index a3373c6e93fa..227b0d7482ae 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -36,7 +36,7 @@ permissions: jobs: build: name: Build image - runs-on: ubuntu-22.04 + runs-on: ${{ matrix.platform.runner-os }} timeout-minutes: 180 outputs: build-id: ${{ steps.build-id.outputs.id }} @@ -44,10 +44,8 @@ jobs: fail-fast: true matrix: platform: [ - # build-push action and qemu use different platform names - # therefore we create a map - { name: "amd64", qemu: "", docker: "linux/amd64" }, - { name: "arm64", qemu: "arm64", docker: "linux/arm64" }, + { name: "amd64", docker: "linux/amd64", runner-os: "ubuntu-22.04" }, + { name: "arm64", docker: "linux/arm64", runner-os: "ubuntu-4-core-arm64" }, ] steps: - name: Create build id @@ -79,12 +77,6 @@ jobs: print(build_args, file=fh) print("EOF", file=fh) - - name: Set up QEMU - if: matrix.platform.qemu != '' - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - with: - platforms: ${{ matrix.platform.qemu }} - - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -104,7 +96,7 @@ jobs: uses: Wandalen/wretry.action@6feedb7dedadeb826de0f45ff482b53b379a7844 # v3.5.0 id: build with: - action: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + action: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 attempt_limit: 60 # 60 attempts * (9 secs delay + 1 sec retry) = ~10 mins attempt_delay: 9000 # 9 secs with: | @@ -122,7 +114,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 with: name: digests-${{ steps.build-id.outputs.id }}-${{ matrix.platform.name }} path: /tmp/digests/* diff --git a/.github/workflows/cache-cleanup.yml b/.github/workflows/cache-cleanup.yml index aa2da65d0a47..dca5505f7bf6 100644 --- a/.github/workflows/cache-cleanup.yml +++ b/.github/workflows/cache-cleanup.yml @@ -34,7 +34,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: Cleanup caches by directories # Only keep caches that match the latest keys for each directory diff --git a/.github/workflows/datasets-e2e.yml b/.github/workflows/datasets-e2e.yml index 2a73a8538b14..dbd90635c74e 100644 --- a/.github/workflows/datasets-e2e.yml +++ b/.github/workflows/datasets-e2e.yml @@ -45,7 +45,7 @@ jobs: - name: Bootstrap uses: ./.github/actions/bootstrap with: - python-version: 3.8 + python-version: 3.9 - name: Install dependencies run: python -m poetry install - name: Run tests diff --git a/.github/workflows/datasets.yml b/.github/workflows/datasets.yml index ca5aa29248cf..860d944696f9 100644 --- a/.github/workflows/datasets.yml +++ b/.github/workflows/datasets.yml @@ -37,7 +37,7 @@ jobs: # In case of a mismatch, the job has to download Python to install it. # Note: Due to a bug in actions/setup-python, we have to put "3.10" in # quotes as it will otherwise assume "3.1" - python: [3.8, 3.9, '3.10', '3.11'] + python: ['3.9', '3.10', '3.11'] name: Python ${{ matrix.python }} diff --git a/.github/workflows/docker-build-main.yml b/.github/workflows/docker-build-main.yml new file mode 100644 index 000000000000..81ef845eae29 --- /dev/null +++ b/.github/workflows/docker-build-main.yml @@ -0,0 +1,69 @@ +name: Build Docker Images Main Branch + +on: + push: + branches: + - 'main' + +jobs: + parameters: + if: github.repository == 'adap/flower' + name: Collect docker build parameters + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + pip-version: ${{ steps.versions.outputs.pip-version }} + setuptools-version: ${{ steps.versions.outputs.setuptools-version }} + flwr-version-ref: ${{ steps.versions.outputs.flwr-version-ref }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: ./.github/actions/bootstrap + id: bootstrap + + - id: versions + run: | + echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" + echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + echo "flwr-version-ref=git+${{ github.server_url }}/${{ github.repository }}.git@${{ github.sha }}" >> "$GITHUB_OUTPUT" + + build-docker-base-images: + name: Build base images + if: github.repository == 'adap/flower' + uses: ./.github/workflows/_docker-build.yml + needs: parameters + with: + namespace-repository: flwr/base + file-dir: src/docker/base/ubuntu + build-args: | + PIP_VERSION=${{ needs.parameters.outputs.pip-version }} + SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} + FLWR_VERSION_REF=${{ needs.parameters.outputs.flwr-version-ref }} + tags: unstable + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + build-docker-binary-images: + name: Build binary images + if: github.repository == 'adap/flower' + uses: ./.github/workflows/_docker-build.yml + needs: build-docker-base-images + strategy: + fail-fast: false + matrix: + images: [ + { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, + { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, + { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, + { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, + { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } + ] + with: + namespace-repository: ${{ matrix.images.repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: BASE_IMAGE=unstable + tags: unstable + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-readme.yml b/.github/workflows/docker-readme.yml new file mode 100644 index 000000000000..29dd787d638e --- /dev/null +++ b/.github/workflows/docker-readme.yml @@ -0,0 +1,51 @@ +name: Update Docker READMEs + +on: + push: + branches: + - 'main' + paths: + - 'src/docker/**/README.md' + +jobs: + collect: + if: ${{ github.repository == 'adap/flower' }} + name: Collect Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + readme_files: ${{ steps.filter.outputs.readme_files }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: filter + with: + list-files: "json" + filters: | + readme: + - 'src/docker/**/README.md' + + update: + if: ${{ needs.collect.outputs.readme_files != '' && toJson(fromJson(needs.collect.outputs.readme_files)) != '[]' }} + name: Update Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: collect + strategy: + matrix: + readme_path: ${{ fromJSON(needs.collect.outputs.readme_files) }} + + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - id: repository + run: echo "name=$(basename $(dirname ${{ matrix.readme_path }}))" >> "$GITHUB_OUTPUT" + + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@e98e4d1628a5f3be2be7c231e50981aee98723ae # v4.0.0 + with: + repository: flwr/${{ steps.repository.outputs.name }} + readme-filepath: ${{ matrix.readme_path }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 49e5b7bf1b36..aba3726017fd 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -127,7 +127,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: Install build tools run: | python -m pip install -U pip==23.3.1 @@ -146,8 +146,6 @@ jobs: if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} run: | python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }} - - name: Install e2e components - run: pip install . - name: Download dataset if: ${{ matrix.dataset }} run: python -c "${{ matrix.dataset }}" @@ -172,7 +170,7 @@ jobs: run: ./../test_superlink.sh bare sqlite - name: Run driver test with client authentication if: ${{ matrix.directory == 'e2e-bare-auth' }} - run: ./../test_superlink.sh bare client-auth + run: ./../test_superlink.sh "${{ matrix.directory }}" client-auth - name: Run reconnection test with SQLite database if: ${{ matrix.directory == 'e2e-bare' }} run: ./../test_reconnection.sh sqlite diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index 812d5b1e398e..e608329872de 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -16,6 +16,8 @@ jobs: if: ${{ github.repository == 'adap/flower' }} name: Publish release runs-on: ubuntu-22.04 + outputs: + flwr-version: ${{ steps.publish.outputs.flwr-version }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -26,10 +28,12 @@ jobs: uses: ./.github/actions/bootstrap - name: Get artifacts and publish + id: publish env: GITHUB_REF: ${{ github.ref }} run: | TAG_NAME=$(echo "${GITHUB_REF_NAME}" | cut -c2-) + echo "flwr-version=$TAG_NAME" >> "$GITHUB_OUTPUT" wheel_name="flwr-${TAG_NAME}-py3-none-any.whl" tar_name="flwr-${TAG_NAME}.tar.gz" @@ -67,8 +71,7 @@ jobs: - id: matrix run: | - FLWR_VERSION=$(poetry version -s) - python dev/build-docker-image-matrix.py --flwr-version "${FLWR_VERSION}" > matrix.json + python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" > matrix.json echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-base-images: diff --git a/.github/workflows/framework.yml b/.github/workflows/framework.yml index a5d2b71f7beb..a8ff69204b58 100644 --- a/.github/workflows/framework.yml +++ b/.github/workflows/framework.yml @@ -25,7 +25,7 @@ jobs: # In case of a mismatch, the job has to download Python to install it. # Note: Due to a bug in actions/setup-python, we have to put "3.10" in # quotes as it will otherwise assume "3.1" - python: [3.8, 3.9, '3.10', '3.11'] + python: ['3.9', '3.10', '3.11'] name: Python ${{ matrix.python }} diff --git a/.github/workflows/update_translations.yml b/.github/workflows/update_translations.yml new file mode 100644 index 000000000000..9419f4aaef25 --- /dev/null +++ b/.github/workflows/update_translations.yml @@ -0,0 +1,79 @@ +name: Translations + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight + workflow_dispatch: # Allows to manually trigger the workflow + +jobs: + update-and-pr: + runs-on: ubuntu-22.04 + permissions: + contents: write + pull-requests: write + env: + branch-name: auto-update-trans-text + name: Update text + steps: + - uses: actions/checkout@v4 + + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m poetry install + pip install sphinx==7.3.7 + + - name: Install pandoc + uses: nikeee/setup-pandoc@v1 + + - name: Update text and translations for all locales + run: | + cd doc + make update-text + for langDir in locales/*; do + if [ -d "$langDir" ]; then + lang=$(basename $langDir) + echo "Updating language $lang" + make update-lang lang=$lang + fi + done + + - name: Commit changes + run: | + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git add doc/locales + git commit -m "Update text and language files" + continue-on-error: true + + - name: Calculate diff # Even without doc changes the update-lang command will generate 228 additions and 60 deletions, so we only want to open a PR when there is more + id: calculate_diff + run: | + additions=$(git diff --numstat HEAD^1 | awk '{s+=$1} END {print s}') + deletions=$(git diff --numstat HEAD^1 | awk '{s+=$2} END {print s}') + echo "Additions: $additions" + echo "Deletions: $deletions" + echo "additions=$additions" >> $GITHUB_OUTPUT + echo "deletions=$deletions" >> $GITHUB_OUTPUT + + - name: Push changes + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + + - name: Create Pull Request + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + delete-branch: true + title: 'docs(framework:skip) Update source texts for translations (automated)' + body: 'This PR is auto-generated to update text and language files.' + draft: false diff --git a/README.md b/README.md index 3f1d96ca53c0..9f2604ad37b0 100644 --- a/README.md +++ b/README.md @@ -143,11 +143,10 @@ Other [examples](https://github.com/adap/flower/tree/main/examples): - [PyTorch: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/pytorch-from-centralized-to-federated) - [Vertical FL](https://github.com/adap/flower/tree/main/examples/vertical-fl) - [Federated Finetuning of OpenAI's Whisper](https://github.com/adap/flower/tree/main/examples/whisper-federated-finetuning) -- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/llm-flowertune) +- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/flowertune-llm) - [Federated Finetuning of a Vision Transformer](https://github.com/adap/flower/tree/main/examples/flowertune-vit) - [Advanced Flower with TensorFlow/Keras](https://github.com/adap/flower/tree/main/examples/advanced-tensorflow) - [Advanced Flower with PyTorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) -- Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation-tensorflow)) - [Comprehensive Flower+XGBoost](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) - [Flower through Docker Compose and with Grafana dashboard](https://github.com/adap/flower/tree/main/examples/flower-via-docker-compose) - [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplan-meier-fitter) diff --git a/baselines/README.md b/baselines/README.md index 3a84df02d8de..75bcccb68b2a 100644 --- a/baselines/README.md +++ b/baselines/README.md @@ -1,10 +1,9 @@ # Flower Baselines +> [!NOTE] > We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines in the `flwr_baselines` directory. Currently, you can make use of baselines for [FedAvg](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/fedavg_mnist), [FedOpt](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/adaptive_federated_optimization), and [LEAF-FEMNIST](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/leaf/femnist). -> The documentation below has been updated to reflect the new way of using Flower baselines. - ## Structure @@ -15,17 +14,15 @@ baselines// ├── README.md ├── pyproject.toml └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files + └── *.py # several .py files ``` -Please note that some baselines might include additional files (e.g. a `requirements.txt`) or a hierarchy of `.yaml` files for [Hydra](https://hydra.cc/). ## Running the baselines -Each baseline is self-contained in its own directory. Furthermore, each baseline defines its own Python environment using [Poetry](https://python-poetry.org/docs/) via a `pyproject.toml` file and [`pyenv`](https://github.com/pyenv/pyenv). If you haven't setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. +> [!NOTE] +> We are in the process of migrating all baselines to use `flwr run`. Those baselines that remain using the previous system (i.e. using [Poetry](https://python-poetry.org/), [Hydra](https://hydra.cc/) and [start_simulation](https://flower.ai/docs/framework/ref-api/flwr.simulation.start_simulation.html)) might require you to first setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. -Assuming `pyenv` and `Poetry` are already installed on your system. Running a baseline can be done by: +Each baseline is self-contained in its own directory. To run a baseline: 1. Cloning the flower repository @@ -34,11 +31,7 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba ``` 2. Navigate inside the directory of the baseline you'd like to run. -3. Follow the `[Environment Setup]` instructions in the `README.md`. In most cases this will require you to just do: - - ```bash - poetry install - ``` +3. Follow the `[Environment Setup]` instructions in the `README.md`. 4. Run the baseline as indicated in the `[Running the Experiments]` section in the `README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. @@ -46,17 +39,22 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba Do you have a new federated learning paper and want to add a new baseline to Flower? Or do you want to add an experiment to an existing baseline paper? Great, we really appreciate your contribution !! +> [!TIP] +> A more verbose version of these steps can be found in the [Flower Baselines documentation](https://flower.ai/docs/baselines/how-to-contribute-baselines.html). + The steps to follow are: +1. Create a new Python 3.10 environment and install Flower (`pip install flwr`) 1. Fork the Flower repo and clone it into your machine. -2. Navigate to the `baselines/` directory, choose a single-word (and **lowercase**) name for your baseline, and from there run: +2. Navigate to the `baselines/` directory, from there and with your environment activated, run: ```bash - # This will create a new directory with the same structure as `baseline_template`. - ./dev/create-baseline.sh + # Choose option "Flower Baseline" when prompted + flwr new ``` -3. Then, go inside your baseline directory and continue with the steps detailed in `EXTENDED_README.md` and `README.md`. -4. Once your code is ready and you have checked that following the instructions in your `README.md` the Python environment can be created correctly and that running the code following your instructions can reproduce the experiments in the paper, you just need to create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! +3. Then, go inside your baseline directory and continue with the steps detailed in the `README.md`. +4. Once your code is ready, check that you have completed all the sections in the `README.md` and that, if a new environment is created, your baseline still runs (i.e. play the role of a person running the baseline you want to contribute). +5. Create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! Further resources: diff --git a/baselines/baseline_template/EXTENDED_README.md b/baselines/baseline_template/EXTENDED_README.md deleted file mode 100644 index 9c8f5bc72fa9..000000000000 --- a/baselines/baseline_template/EXTENDED_README.md +++ /dev/null @@ -1,123 +0,0 @@ - -# Extended Readme - -> The baselines are expected to run in a machine running Ubuntu 22.04 - -While `README.md` should include information about the baseline you implement and how to run it, this _extended_ readme provides info on what's the expected directory structure for a new baseline and more generally the instructions to follow before your baseline can be merged into the Flower repository. Please follow closely these instructions. It is likely that you have already completed steps 1-2. - -1. Fork the Flower repository and clone it. -2. Navigate to the `baselines/` directory and from there run: - ```bash - # This will create a new directory with the same structure as this `baseline_template` directory. - ./dev/create-baseline.sh - ``` -3. All your code and configs should go into a sub-directory with the same name as the name of your baseline. - * The sub-directory contains a series of Python scripts that you can edit. Please stick to these files and consult with us if you need additional ones. - * There is also a basic config structure in `/conf` ready be parsed by [Hydra](https://hydra.cc/) when executing your `main.py`. -4. Therefore, the directory structure in your baseline should look like: - ```bash - baselines/ - ├── README.md # describes your baseline and everything needed to use it - ├── EXTENDED_README.md # to remove before creating your PR - ├── pyproject.toml # details your Python environment - └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - - ``` -> :warning: Make sure the variable `name` in `pyproject.toml` is set to the name of the sub-directory containing all your code. - -5. Add your dependencies to the `pyproject.toml` (see below a few examples on how to do it). Read more about Poetry below in this `EXTENDED_README.md`. -6. Regularly check that your coding style and the documentation you add follow good coding practices. To test whether your code meets the requirements, please run the following: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/test-baseline.sh - ./dev/test-baseline-structure.sh - ``` - Both `test-baseline.sh` and `test-baseline-structure.sh` will also be automatically run when you create a PR, and both tests need to pass for the baseline to be merged. - To automatically solve some formatting issues and apply easy fixes, please run the formatting script: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/format-baseline.sh - ``` -7. Ensure that the Python environment for your baseline can be created without errors by simply running `poetry install` and that this is properly described later when you complete the `Environment Setup` section in `README.md`. This is specially important if your environment requires additional steps after doing `poetry install`. -8. Ensure that your baseline runs with default arguments by running `poetry run python -m .main`. Then, describe this and other forms of running your code in the `Running the Experiments` section in `README.md`. -9. Once your code is ready and you have checked: - * that following the instructions in your `README.md` the Python environment can be created correctly - - * that running the code following your instructions can reproduce the experiments in the paper - - , then you just need to create a Pull Request (PR) to kickstart the process of merging your baseline into the Flower repository. - -> Once you are happy to merge your baseline contribution, please delete this `EXTENDED_README.md` file. - - -## About Poetry - -We use Poetry to manage the Python environment for each individual baseline. You can follow the instructions [here](https://python-poetry.org/docs/) to install Poetry in your machine. - - -### Specifying a Python Version (optional) -By default, Poetry will use the Python version in your system. In some settings, you might want to specify a particular version of Python to use inside your Poetry environment. You can do so with [`pyenv`](https://github.com/pyenv/pyenv). Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): -```bash -curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc -``` - -You can then install any Python version with `pyenv install ` (e.g. `pyenv install 3.9.17`). Then, in order to use that version for your baseline, you'd do the following: - -```bash -# cd to your baseline directory (i.e. where the `pyproject.toml` is) -pyenv local - -# set that version for poetry -poetry env use - -# then you can install your Poetry environment (see the next setp) -``` - -### Installing Your Environment -With the Poetry tool already installed, you can create an environment for this baseline with commands: -```bash -# run this from the same directory as the `pyproject.toml` file is -poetry install -``` - -This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Next, you should add the dependencies for your code. It is **critical** that you fix the version of the packages you use using a `=` not a `=^`. You can do so via [`poetry add`](https://python-poetry.org/docs/cli/#add). Below are some examples: - -```bash -# For instance, if you want to install tqdm -poetry add tqdm==4.65.0 - -# If you already have a requirements.txt, you can add all those packages (but ensure you have fixed the version) in one go as follows: -poetry add $( cat requirements.txt ) -``` -With each `poetry add` command, the `pyproject.toml` gets automatically updated so you don't need to keep that `requirements.txt` as part of this baseline. - - -More critically however, is adding your ML framework of choice to the list of dependencies. For some frameworks you might be able to do so with the `poetry add` command. Check [the Poetry documentation](https://python-poetry.org/docs/cli/#add) for how to add packages in various ways. For instance, let's say you want to use PyTorch: - -```bash -# with plain `pip` you'd run a command such as: -pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 - -# to add the same 3 dependencies to your Poetry environment you'd need to add the URL to the wheel that the above pip command auto-resolves for you. -# You can find those wheels in `https://download.pytorch.org/whl/cu117`. Copy the link and paste it after the `poetry add` command. -# For instance to add `torch==1.13.1+cu117` and a x86 Linux system with Python3.8 you'd: -poetry add https://download.pytorch.org/whl/cu117/torch-1.13.1%2Bcu117-cp38-cp38-linux_x86_64.whl -# you'll need to repeat this for both `torchvision` and `torchaudio` -``` -The above is just an example of how you can add these dependencies. Please refer to the Poetry documentation to extra reference. - -If all attempts fail, you can still install packages via standard `pip`. You'd first need to source/activate your Poetry environment. -```bash -# first ensure you have created your environment -# and installed the base packages provided in the template -poetry install - -# then activate it -poetry shell -``` -Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`) so you can install further packages with `pip`. Please note that, unlike with `poetry add`, these extra requirements won't be captured by `pyproject.toml`. Therefore, please ensure that you provide all instructions needed to: (1) create the base environment with Poetry and (2) install any additional dependencies via `pip` when you complete your `README.md`. \ No newline at end of file diff --git a/baselines/baseline_template/README.md b/baselines/baseline_template/README.md deleted file mode 100644 index ee6e1e96976f..000000000000 --- a/baselines/baseline_template/README.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: title of the paper -url: URL to the paper page (not the pdf) -labels: [label1, label2] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "" -dataset: [dataset1, dataset2] # list of datasets you include in your baseline. Do not use "" ---- - -# :warning: *_Title of your baseline_* - -> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. - -> :warning: This is the template to follow when creating a new Flower Baseline. Please follow the instructions in `EXTENDED_README.md` - -> :warning: Please follow the instructions carefully. You can see the [FedProx-MNIST baseline](https://github.com/adap/flower/tree/main/baselines/fedprox) as an example of a baseline that followed this guide. - -> :warning: Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. - -**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. https://arxiv.org/abs/1512.03385). If your paper is in from a journal or conference proceedings, please follow the same logic._* - -**Authors:** :warning: *_list authors of the paper_* - -**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* - - -## About this baseline - -**What’s implemented:** :warning: *_Concisely describe what experiment(s) in the publication can be replicated by running the code. Please only use a few sentences. Start with: “The code in this directory …”_* - -**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset)._* - -**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Someone out there might not have access to the same resources you have so, could list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* - -**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* - - -## Experimental Setup - -**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* - -**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* - -**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* - -**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* - - -## Environment Setup - -:warning: _The Python environment for all baselines should follow these guidelines in the `EXTENDED_README`. Specify the steps to create and activate your environment. If there are any external system-wide requirements, please include instructions for them too. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ - - -## Running the Experiments - -:warning: _Provide instructions on the steps to follow to run all the experiments._ -```bash -# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) should run (including dataset download and necessary partitioning) by executing the command: - -poetry run python -m .main # where is the name of this directory and that of the only sub-directory in this directory (i.e. where all your source code is) - -# If you are using a dataset that requires a complicated download (i.e. not using one natively supported by TF/PyTorch) + preprocessing logic, you might want to tell people to run one script first that will do all that. Please ensure the download + preprocessing can be configured to suit (at least!) a different download directory (and use as default the current directory). The expected command to run to do this is: - -poetry run python -m .dataset_preparation - -# It is expected that you baseline supports more than one dataset and different FL settings (e.g. different number of clients, dataset partitioning methods, etc). Please provide a list of commands showing how these experiments are run. Include also a short explanation of what each one does. Here it is expected you'll be using the Hydra syntax to override the default config. - -poetry run python -m .main -. -. -. -poetry run python -m .main -``` - - -## Expected Results - -:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ - -```bash -# it is likely that for one experiment you need to sweep over different hyperparameters. You are encouraged to use Hydra's multirun functionality for this. This is an example of how you could achieve this for some typical FL hyperparameteres - -poetry run python -m .main --multirun num_client_per_round=5,10,50 dataset=femnist,cifar10 -# the above command will run a total of 6 individual experiments (because 3client_configs x 2datasets = 6 -- you can think of it as a grid). - -[Now show a figure/table displaying the results of the above command] - -# add more commands + plots for additional experiments. -``` diff --git a/baselines/baseline_template/baseline_template/__init__.py b/baselines/baseline_template/baseline_template/__init__.py deleted file mode 100644 index a5e567b59135..000000000000 --- a/baselines/baseline_template/baseline_template/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Template baseline package.""" diff --git a/baselines/baseline_template/baseline_template/client.py b/baselines/baseline_template/baseline_template/client.py deleted file mode 100644 index d2e2206111f3..000000000000 --- a/baselines/baseline_template/baseline_template/client.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Define your client class and a function to construct such clients. - -Please overwrite `flwr.client.NumPyClient` or `flwr.client.Client` and create a function -to instantiate your client. -""" diff --git a/baselines/baseline_template/baseline_template/conf/base.yaml b/baselines/baseline_template/baseline_template/conf/base.yaml deleted file mode 100644 index 2d65b3b989b2..000000000000 --- a/baselines/baseline_template/baseline_template/conf/base.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# this is the config that will be loaded as default by main.py -# Please follow the provided structure (this will ensuring all baseline follow -# a similar configuration structure and hence be easy to customise) - -dataset: - # dataset config - -model: - # model config - -strategy: - _target_: # points to your strategy (either custom or exiting in Flower) - # rest of strategy config - -client: - # client config diff --git a/baselines/baseline_template/baseline_template/dataset.py b/baselines/baseline_template/baseline_template/dataset.py deleted file mode 100644 index 5e436abe12fb..000000000000 --- a/baselines/baseline_template/baseline_template/dataset.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Handle basic dataset creation. - -In case of PyTorch it should return dataloaders for your dataset (for both the clients -and the server). If you are using a custom dataset class, this module is the place to -define it. If your dataset requires to be downloaded (and this is not done -automatically -- e.g. as it is the case for many dataset in TorchVision) and -partitioned, please include all those functions and logic in the -`dataset_preparation.py` module. You can use all those functions from functions/methods -defined here of course. -""" diff --git a/baselines/baseline_template/baseline_template/dataset_preparation.py b/baselines/baseline_template/baseline_template/dataset_preparation.py deleted file mode 100644 index bd3440b9276b..000000000000 --- a/baselines/baseline_template/baseline_template/dataset_preparation.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Handle the dataset partitioning and (optionally) complex downloads. - -Please add here all the necessary logic to either download, uncompress, pre/post-process -your dataset (or all of the above). If the desired way of running your baseline is to -first download the dataset and partition it and then run the experiments, please -uncomment the lines below and tell us in the README.md (see the "Running the Experiment" -block) that this file should be executed first. -""" -# import hydra -# from hydra.core.hydra_config import HydraConfig -# from hydra.utils import call, instantiate -# from omegaconf import DictConfig, OmegaConf - - -# @hydra.main(config_path="conf", config_name="base", version_base=None) -# def download_and_preprocess(cfg: DictConfig) -> None: -# """Does everything needed to get the dataset. - -# Parameters -# ---------- -# cfg : DictConfig -# An omegaconf object that stores the hydra config. -# """ - -# ## 1. print parsed config -# print(OmegaConf.to_yaml(cfg)) - -# # Please include here all the logic -# # Please use the Hydra config style as much as possible specially -# # for parts that can be customised (e.g. how data is partitioned) - -# if __name__ == "__main__": - -# download_and_preprocess() diff --git a/baselines/baseline_template/baseline_template/main.py b/baselines/baseline_template/baseline_template/main.py deleted file mode 100644 index 25ae1bec6a10..000000000000 --- a/baselines/baseline_template/baseline_template/main.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Create and connect the building blocks for your experiments; start the simulation. - -It includes processioning the dataset, instantiate strategy, specify how the global -model is going to be evaluated, etc. At the end, this script saves the results. -""" -# these are the basic packages you'll need here -# feel free to remove some if aren't needed -import hydra -from omegaconf import DictConfig, OmegaConf - - -@hydra.main(config_path="conf", config_name="base", version_base=None) -def main(cfg: DictConfig) -> None: - """Run the baseline. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # 1. Print parsed config - print(OmegaConf.to_yaml(cfg)) - - # 2. Prepare your dataset - # here you should call a function in datasets.py that returns whatever is needed to: - # (1) ensure the server can access the dataset used to evaluate your model after - # aggregation - # (2) tell each client what dataset partitions they should use (e.g. a this could - # be a location in the file system, a list of dataloader, a list of ids to extract - # from a dataset, it's up to you) - - # 3. Define your clients - # Define a function that returns another function that will be used during - # simulation to instantiate each individual client - # client_fn = client.() - - # 4. Define your strategy - # pass all relevant argument (including the global dataset used after aggregation, - # if needed by your method.) - # strategy = instantiate(cfg.strategy, ) - - # 5. Start Simulation - # history = fl.simulation.start_simulation() - - # 6. Save your results - # Here you can save the `history` returned by the simulation and include - # also other buffers, statistics, info needed to be saved in order to later - # on generate the plots you provide in the README.md. You can for instance - # access elements that belong to the strategy for example: - # data = strategy.get_my_custom_data() -- assuming you have such method defined. - # Hydra will generate for you a directory each time you run the code. You - # can retrieve the path to that directory with this: - # save_path = HydraConfig.get().runtime.output_dir - - -if __name__ == "__main__": - main() diff --git a/baselines/baseline_template/baseline_template/models.py b/baselines/baseline_template/baseline_template/models.py deleted file mode 100644 index 71fa553d1f59..000000000000 --- a/baselines/baseline_template/baseline_template/models.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Define our models, and training and eval functions. - -If your model is 100% off-the-shelf (e.g. directly from torchvision without requiring -modifications) you might be better off instantiating your model directly from the Hydra -config. In this way, swapping your model for another one can be done without changing -the python code at all -""" diff --git a/baselines/baseline_template/baseline_template/server.py b/baselines/baseline_template/baseline_template/server.py deleted file mode 100644 index 2fd7d42cde5a..000000000000 --- a/baselines/baseline_template/baseline_template/server.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Create global evaluation function. - -Optionally, also define a new Server class (please note this is not needed in most -settings). -""" diff --git a/baselines/baseline_template/baseline_template/strategy.py b/baselines/baseline_template/baseline_template/strategy.py deleted file mode 100644 index 17436c401c30..000000000000 --- a/baselines/baseline_template/baseline_template/strategy.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Optionally define a custom strategy. - -Needed only when the strategy is not yet implemented in Flower or because you want to -extend or modify the functionality of an existing strategy. -""" diff --git a/baselines/baseline_template/baseline_template/utils.py b/baselines/baseline_template/baseline_template/utils.py deleted file mode 100644 index 9a831719d623..000000000000 --- a/baselines/baseline_template/baseline_template/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Define any utility function. - -They are not directly relevant to the other (more FL specific) python modules. For -example, you may define here things like: loading a model from a checkpoint, saving -results, plotting. -""" diff --git a/baselines/baseline_template/pyproject.toml b/baselines/baseline_template/pyproject.toml deleted file mode 100644 index 31f1ee7bfe6d..000000000000 --- a/baselines/baseline_template/pyproject.toml +++ /dev/null @@ -1,137 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.masonry.api" - -[tool.poetry] -name = "" # <----- Ensure it matches the name of your baseline directory containing all the source code -version = "1.0.0" -description = "Flower Baselines" -license = "Apache-2.0" -authors = ["The Flower Authors "] -readme = "README.md" -homepage = "https://flower.ai" -repository = "https://github.com/adap/flower" -documentation = "https://flower.ai" -classifiers = [ - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: POSIX :: Linux", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: Implementation :: CPython", - "Topic :: Scientific/Engineering", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Scientific/Engineering :: Mathematics", - "Topic :: Software Development", - "Topic :: Software Development :: Libraries", - "Topic :: Software Development :: Libraries :: Python Modules", - "Typing :: Typed", -] - -[tool.poetry.dependencies] -python = ">=3.8.15, <3.12.0" # don't change this -flwr = { extras = ["simulation"], version = "1.5.0" } -hydra-core = "1.3.2" # don't change this - -[tool.poetry.dev-dependencies] -isort = "==5.13.2" -black = "==24.2.0" -docformatter = "==1.7.5" -mypy = "==1.4.1" -pylint = "==2.8.2" -flake8 = "==3.9.2" -pytest = "==6.2.4" -pytest-watch = "==4.2.0" -ruff = "==0.0.272" -types-requests = "==2.27.7" - -[tool.isort] -line_length = 88 -indent = " " -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -use_parentheses = true - -[tool.black] -line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] - -[tool.pytest.ini_options] -minversion = "6.2" -addopts = "-qq" -testpaths = [ - "flwr_baselines", -] - -[tool.mypy] -ignore_missing_imports = true -strict = false -plugins = "numpy.typing.mypy_plugin" - -[tool.pylint."MESSAGES CONTROL"] -disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" -good-names = "i,j,k,_,x,y,X,Y" -signature-mutators = "hydra.main.main" - -[tool.pylint.typecheck] -generated-members = "numpy.*, torch.*, tensorflow.*" - -[[tool.mypy.overrides]] -module = [ - "importlib.metadata.*", - "importlib_metadata.*", -] -follow_imports = "skip" -follow_imports_for_stubs = true -disallow_untyped_calls = false - -[[tool.mypy.overrides]] -module = "torch.*" -follow_imports = "skip" -follow_imports_for_stubs = true - -[tool.docformatter] -wrap-summaries = 88 -wrap-descriptions = 88 - -[tool.ruff] -target-version = "py38" -line-length = 88 -select = ["D", "E", "F", "W", "B", "ISC", "C4"] -fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] -ignore = ["B024", "B027"] -exclude = [ - ".bzr", - ".direnv", - ".eggs", - ".git", - ".hg", - ".mypy_cache", - ".nox", - ".pants.d", - ".pytype", - ".ruff_cache", - ".svn", - ".tox", - ".venv", - "__pypackages__", - "_build", - "buck-out", - "build", - "dist", - "node_modules", - "venv", - "proto", -] - -[tool.ruff.pydocstyle] -convention = "numpy" diff --git a/baselines/dev/create-baseline.sh b/baselines/dev/create-baseline.sh deleted file mode 100755 index 53cd79c569aa..000000000000 --- a/baselines/dev/create-baseline.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# This script duplicates the `baseline_template` directory and changes its name -# to the one you specify when running this script. That name is also used to -# rename the subdirectory inside your new baseline directory as well as to set -# the Python package name that Poetry will build - -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -template="baseline_template" -name=$1 - -# copying directory -echo "Copying '$template' and renaming it to '$name'" -cp -r $template $name - -# renaming sub-directory -echo "Renaming sub-directory as '$name'" -mv $name/$template $name/$name - -# adjusting package name in pyproject.toml -cd $name -if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' -e "s//$name/" pyproject.toml -else - sed -i -e "s//$name/" pyproject.toml -fi - -echo "!!! Your directory for your baseline '$name' is ready." diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py index ecc3482c6fce..974c264a6220 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/doc/source/conf.py @@ -37,7 +37,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.10.0" +release = "1.11.0" # -- General configuration --------------------------------------------------- diff --git a/baselines/doc/source/how-to-contribute-baselines.rst b/baselines/doc/source/how-to-contribute-baselines.rst index b568e73f1c11..429ac714c1aa 100644 --- a/baselines/doc/source/how-to-contribute-baselines.rst +++ b/baselines/doc/source/how-to-contribute-baselines.rst @@ -6,16 +6,14 @@ Do you have a new federated learning paper and want to add a new baseline to Flo The goal of Flower Baselines is to reproduce experiments from popular papers to accelerate researchers by enabling faster comparisons to new strategies, datasets, models, and federated pipelines in general. Before you start to work on a new baseline or experiment, please check the `Flower Issues `_ or `Flower Pull Requests `_ to see if someone else is already working on it. Please open a new issue if you are planning to work on a new baseline or experiment with a short description of the corresponding paper and the experiment you want to contribute. +If you are proposing a brand new baseline, please indicate what experiments from the paper are planning to include. Requirements ------------ -Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments are running with Flower and replicate the results of a paper. Flower baselines need to make use of: +Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments run with Flower, use `Flower Datasets `_, and replicate the results of a paper. +Preferably, the baselines make use of PyTorch, but other ML frameworks are also welcome. The baselines are expected to run in a machine with Ubuntu 22.04, but if yours runs also on macOS even better! -* `Poetry `_ to manage the Python environment. -* `Hydra `_ to manage the configuration files for your experiments. - -You can find more information about how to setup Poetry in your machine in the ``EXTENDED_README.md`` that is generated when you prepare your baseline. Add a new Flower Baseline ------------------------- @@ -27,11 +25,18 @@ Let's say you want to contribute the code of your most recent Federated Learning #. **Get the Flower source code on your machine** #. Fork the Flower codebase: go to the `Flower GitHub repo `_ and fork the code (click the *Fork* button in the top-right corner and follow the instructions) #. Clone the (forked) Flower source code: :code:`git clone git@github.com:[your_github_username]/flower.git` - #. Open the code in your favorite editor. -#. **Use the provided script to create your baseline directory** - #. Navigate to the baselines directory and run :code:`./dev/create-baseline.sh fedawesome` - #. A new directory in :code:`baselines/fedawesome` is created. - #. Follow the instructions in :code:`EXTENDED_README.md` and :code:`README.md` in your baseline directory. +#. **Create a new baseline using the template** + #. Create a new Python environment with Python 3.10 (we recommend doing this with `pyenv `_) + #. Install flower with: :code:`pip install flwr`. + #. Navigate to the baselines directory and run: :code:`flwr new fedawesome`. When prompted, choose the option :code:`Flower Baseline`. + #. A new directory in :code:`baselines/fedawesome` is created with the structure needed for a Flower Baseline. + #. Follow the instructions in the :code:`README.md` in your baseline directory. + + .. tip:: + At this point, your baseline contains source code showing how a simple :code:`PyTorch+CIFAR10` project can be built with Flower. + You can run it directly by executing :code:`flwr run .` from inside the directory of your baseline. Update the code with that + needed to implement your baseline. + #. **Open a pull request** #. Stage your changes: :code:`git add .` #. Commit & push: :code:`git commit -m "Create new FedAwesome baseline" ; git push` @@ -49,15 +54,18 @@ Further reading: Usability --------- -Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command such as: +Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command after installing the baseline project: .. code-block:: bash - poetry run python -m .main - - # or, once sourced into your environment - python -m .main + # Install the baseline project + pip install -e . + + # Run the baseline using default config + flwr run . + + # Run the baseline overriding the config + flwr run . --run-config lr=0.01,num-server-rounds=200 -We provide you with a `template-baseline `_ to use as guidance when contributing your baseline. Having all baselines follow a homogenous structure helps users to tryout many baselines without the overheads of having to understand each individual codebase. Similarly, by using Hydra throughout, users will immediately know how to parameterise your experiments directly from the command line. -We look forward to your contribution! +We look forward to your contribution! \ No newline at end of file diff --git a/baselines/doc/source/how-to-use-baselines.rst b/baselines/doc/source/how-to-use-baselines.rst index 4704a9b6074e..ec65f8f7d5ee 100644 --- a/baselines/doc/source/how-to-use-baselines.rst +++ b/baselines/doc/source/how-to-use-baselines.rst @@ -5,7 +5,6 @@ Use Baselines We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines and use them: `baselines (old) `_. Currently, you can make use of baselines for `FedAvg `_, `FedOpt `_, and `LEAF-FEMNIST `_. - The documentation below has been updated to reflect the new way of using Flower baselines. Structure --------- @@ -15,87 +14,116 @@ All baselines are available in the directory `baselines / + ├── LICENSE ├── README.md - ├── pyproject.toml + ├── pyproject.toml # defines dependencies + ├── _static # optionally a directory to save plots └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - -Please note that some baselines might include additional files (e.g. a :code:`requirements.txt`) or a hierarchy of :code:`.yaml` files for `Hydra `_. + └── *.py # several .py files Setting up your machine ----------------------- -.. note:: - Flower baselines are designed to run on Ubuntu 22.04. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. +.. tip:: + Flower baselines are designed to run on Ubuntu 22.04 and Python 3.10. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. + All baselines are expected to make use of `pyenv `_. -Common to all baselines is `Poetry `_, a tool to manage Python dependencies. Baselines also make use of `Pyenv `_. You'll need to install both on your system before running a baseline. What follows is a step-by-step guide on getting :code:`pyenv` and :code:`Poetry` installed on your system. +.. note:: + We are in the process of migrating all baselines to use `flwr run`. Those that haven't yet been migrated still make use of `Poetry `_, a tool to manage Python dependencies. + Identifying whether the baseline you want to run requires Poetry or not is easy: check if the `Environment Setup` section in the baseline readme mentions Poetry. + Follow the instructions later in this section if you need to setup Poetry in your system. -Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it. +Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it, including for platforms other than Ubuntu. .. code-block:: bash - # first install a few packages needed later for pyenv - sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev + # first install a few packages needed later for pyenv + sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev - # now clone pyenv into your home directory (this is the default way of installing pyenv) - git clone https://github.com/pyenv/pyenv.git ~/.pyenv + # now clone pyenv into your home directory (this is the default way of installing pyenv) + git clone https://github.com/pyenv/pyenv.git ~/.pyenv - # Then add pyenv to your path by adding the below to your .bashrc/.zshrc - export PYENV_ROOT="$HOME/.pyenv" - command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" - eval "$(pyenv init -)" + # Then add pyenv to your path by adding the below to your .bashrc/.zshrc + export PYENV_ROOT="$HOME/.pyenv" + command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" + eval "$(pyenv init -)" Verify your installation by opening a new terminal and .. code-block:: bash - # check python versions available - pyenv versions - # * system (...) # <-- it should just show one + # check python versions available + pyenv versions + # * system (...) # <-- it should just show one + +Then you can proceed and install any version of Python. Baselines use Python 3.10, so we'll be installing a recent version of it. + +.. code-block:: bash + + pyenv install 3.10.14 + # this will take a little while + # once done, you should see that that version is available + pyenv versions + # system + # * 3.10.14 # <-- you just installed this -Then you can proceed and install any version of Python. Most baselines currently use Python 3.10.6, so we'll be installing that one. +Next, let's install the :code:`virtualenv` plugin. Check `the documentation `_ for alternative installation methods. .. code-block:: bash - pyenv install 3.10.6 - # this will take a little while - # once done, you should see that that version is available - pyenv versions - # system - # * 3.10.6 # <-- you just installed this + # Clone `pyenv-virtualenv` + git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv + + # Restart your shell + exec "$SHELL" + -Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. Installing Poetry can be done from a single command: +Using :code:`pyenv` +~~~~~~~~~~~~~~~~~~~ + +Creating a virtual environment can be done as follows: .. code-block:: bash - curl -sSL https://install.python-poetry.org | python3 - + # Create an environment for Python 3.10.14 named test-env + pyenv virtualenv 3.10.14 test-env + + # Then activate it + pyenv activate test-env + + # Deactivate it as follows + pyenv deactivate - # add to path by putting this line at the end of your .zshrc/.bashrc - export PATH="$HOME/.local/bin:$PATH" + +(optional) Setup Poetry +~~~~~~~~~~~~~~~~~~~~~~~ + +Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. It can be done from a single command: + +.. code-block:: bash + + curl -sSL https://install.python-poetry.org | python3 - + + # add to path by putting this line at the end of your .zshrc/.bashrc + export PATH="$HOME/.local/bin:$PATH" To install Poetry from source, to customise your installation, or to further integrate Poetry with your shell after installation, please check `the Poetry documentation `_. + Using a Flower Baseline ----------------------- -To use Flower Baselines you need first to install :code:`pyenv` and :code:`Poetry`, then: +To use Flower Baselines you need first to install :code:`pyenv` and, depending on the baselines, also :code:`Poetry`, then: 1. Clone the flower repository .. code-block:: bash - git clone https://github.com/adap/flower.git && cd flower + git clone https://github.com/adap/flower.git && cd flower 2. Navigate inside the directory of the baseline you'd like to run -3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. In most cases this will require you to just do: - -.. code-block:: bash - - poetry install - -4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. +3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. +4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the :code:`[Expected Results]` section to reproduce the experiments in the paper. diff --git a/benchmarks/flowertune-llm/README.md b/benchmarks/flowertune-llm/README.md index 0cb69e7ff9c7..45cd8a828a89 100644 --- a/benchmarks/flowertune-llm/README.md +++ b/benchmarks/flowertune-llm/README.md @@ -1,4 +1,4 @@ -![](_static/flower_llm.jpg) +[![FlowerTune LLM Leaderboard](_static/flower_llm.png)](https://flower.ai/benchmarks/llm-leaderboard) # FlowerTune LLM Leaderboard @@ -9,39 +9,41 @@ Please follow the instructions to run and evaluate the federated LLMs. ## Create a new project -As the first step, please register a Flower account on [Flower website](https://flower.ai/login). -Assuming `flwr` package is already installed on your system (check [here](https://flower.ai/docs/framework/how-to-install-flower.html) for `flwr` installation). -We provide a single-line command to create a new project directory based on your selected challenge: +As the first step, please register for a Flower account on [flower.ai/login](https://flower.ai/login). +Then, create a new Python environment and install Flower. + +> [!TIP] +> We recommend using `pyenv` with the `virtualenv` plugin to create your environment. Other managers, such as Conda, will likely work as well. Check the [documentation](https://flower.ai/docs/framework/how-to-install-flower.html) for alternative ways to install Flower. ```shell -flwr new --framework=flwrtune --username=your_flower_account +pip install flwr ``` -Then you will see a prompt to ask your project name and the choice of LLM challenges from the set of general NLP, finance, medical and code. -Type your project name and select your preferred challenge, -and then a new project directory will be generated automatically. - -### Structure +In the new environment, create a new Flower project using the `FlowerTune` template. You will be prompted for a name to give to your project, your username, and for your choice of LLM challenge: +```shell +flwr new --framework=FlowerTune +``` -After running `flwr new`, you will see a new directory generated with the following structure: +The `flwr new` command will generate a directory with the following structure: ```bash - - ├── README.md # <- Instructions - ├── pyproject.toml # <- Environment dependencies - └── - ├── app.py # <- Flower ClientApp/ServerApp build - ├── client.py # <- Flower client constructor - ├── server.py # <- Sever-related functions - ├── models.py # <- Model build - ├── dataset.py # <- Dataset and tokenizer build - ├── conf/config.yaml # <- User configuration - └── conf/static_config.yaml # <- Static configuration + +├── README.md # Instructions +├── pyproject.toml # Environment dependencies and configs +└── + ├── __init__.py + ├── client_app.py # Flower ClientApp build + ├── dataset.py # Dataset and tokenizer build + ├── models.py # Model build + ├── server_app.py # Flower ServerApp build + └── strategy.py # Flower strategy build ``` This can serve as the starting point for you to build up your own federated LLM fine-tuning methods. -Please note that any modification to the content of `conf/static_config.yaml` is strictly prohibited for those who wish to participate in the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard). -Otherwise, the submission will not be considered. + +> [!IMPORTANT] +> Please note that if you intend to submit your project as an entry to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard) modifications to `[tool.flwr.app.config.static]` and `[tool.flwr.federations.local-simulation]` sections in the `pyproject.toml` are not allowed and will invalidate the submission. + ## Run FlowerTune LLM challenges @@ -50,12 +52,17 @@ With a new project directory created, running a baseline challenge can be done b 1. Navigate inside the directory that you just created. -2. Follow the `Environments setup` section of `README.md` in the project directory to install project dependencies. +2. Follow the `Environments setup` section of `README.md` in the project directory to install the project dependencies. 3. Run the challenge as indicated in the `Running the challenge` section in the `README.md`. -## Evaluate pre-trained LLMs +## Evaluate fine-tuned LLMs + +Once the LLM fine-tuning finished, evaluate the performance of your fine-tuned LLM +following the `README.md` in [`evaluation`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation) directory. + -After the LLM fine-tuning finished, evaluate the performance of your pre-trained LLMs -following the `README.md` in `evaluation` directory. +> [!NOTE] +> If you have any questions about running FlowerTune LLM challenges or evaluation, please feel free to make posts at [Flower Discuss](https://discuss.flower.ai) forum, +or join our [Slack channel](https://flower.ai/join-slack/) to ask questions in the `#flowertune-llm-leaderboard` channel. diff --git a/benchmarks/flowertune-llm/_static/flower_llm.jpg b/benchmarks/flowertune-llm/_static/flower_llm.jpg deleted file mode 100644 index 96081d9c2ad1..000000000000 Binary files a/benchmarks/flowertune-llm/_static/flower_llm.jpg and /dev/null differ diff --git a/benchmarks/flowertune-llm/_static/flower_llm.png b/benchmarks/flowertune-llm/_static/flower_llm.png new file mode 100644 index 000000000000..e9a0ba3bf30e Binary files /dev/null and b/benchmarks/flowertune-llm/_static/flower_llm.png differ diff --git a/benchmarks/flowertune-llm/evaluation/README.md b/benchmarks/flowertune-llm/evaluation/README.md new file mode 100644 index 000000000000..c99ad640203a --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/README.md @@ -0,0 +1,49 @@ +# FlowerTune LLM Evaluation + +This directory provides various evaluation metrics to assess the quality of your fine-tuned LLMs. +If you are participating [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard), evaluating your fine-tuned LLM is the final step prior to have your submission added to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate). The evaluation scores generated here will be displayed as the definitive values on the LLM Leaderboard. + +## How to run + +Navigate to the directory corresponding to your selected challenge ([`general NLP`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/general-nlp), [`finance`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/finance), [`medical`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/medical), or [`code`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/code)) and follow the instructions there to execute the evaluation. + +> [!NOTE] +> If you wish to participate in the LLM Leaderboard, you must not modify the evaluation code and should use the exact command provided in the respective directory to run the evaluation. + + +## Baseline results + +The default template generated by `flwr new` (see the [Project Creation Instructions](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm#create-a-new-project)) for each challenge will produce results as follows, which serve as the lower bound on the LLM Leaderboard. + +### General NLP + +| | MT-1 | MT-2 | MT-Avg | +|:--------:|:----:|:----:|:------:| +| MT Score | 5.54 | 5.52 | 5.53 | + +### Finance + +| | FPB | FIQA | TFNS | Avg | +|:-------:|:-----:|:-----:|:-----:|:-----:| +| Acc (%) | 44.55 | 62.50 | 28.77 | 45.27 | + +### Medical + +| | PubMedQA | MedMCQA | MedQA | Avg | +|:-------:|:--------:|:-------:|:-----:|:-----:| +| Acc (%) | 59.00 | 23.69 | 27.10 | 36.60 | + +### Code + +| | MBPP | HumanEval | MultiPL-E (JS) | MultiPL-E (C++) | Avg | +|:----------:|:-----:|:---------:|:--------------:|:---------------:|:-----:| +| Pass@1 (%) | 31.60 | 23.78 | 28.57 | 25.47 | 27.36 | + +> [!NOTE] +> In the LLM Leaderboard, we rank the submissions based on the **average** value derived from different evaluation datasets for each challenge. + + +## Make submission on FlowerTune LLM Leaderboard + +If your LLM outperforms the listed benchmarks in any challenge, +we encourage you to submit your code and model to the FlowerTune LLM Leaderboard without hesitation (see the [How-to-participate Instructions](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate)). diff --git a/benchmarks/flowertune-llm/evaluation/code/README.md b/benchmarks/flowertune-llm/evaluation/code/README.md new file mode 100644 index 000000000000..fd63ced2f1e2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/README.md @@ -0,0 +1,70 @@ +# Evaluation for Code challenge + +We leverage the code generation evaluation metrics provided by [bigcode-evaluation-harness](https://github.com/bigcode-project/bigcode-evaluation-harness/tree/main) to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [MBPP](https://huggingface.co/datasets/google-research-datasets/mbpp) (Python), [HumanEval](https://huggingface.co/datasets/openai/openai_humaneval) (Python), and [MultiPL-E](https://github.com/nuprl/MultiPL-E) (JavaScript, C++). + +> [!WARNING] +> The evaluation process takes ~30 GB VRAM. On a 40GB A100 it requires 15-30mins depending on the dataset to complete. + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/code ./flowertune-eval-code && rm -rf flower && cd flowertune-eval-code +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +After that, install `Node.js` and `g++` for the evaluation of JavaScript, C++: + +```shell +# Install nvm (Node Version Manager) +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash + +# Restart your terminal + +# Download and install Node.js (you may need to restart the terminal) +nvm install 20 + +# Install g++ +sudo apt-get install g++ +``` + +Then, download the `main.py` script from `bigcode-evaluation-harness` repository. + +```shell +git clone https://github.com/bigcode-project/bigcode-evaluation-harness.git && cd bigcode-evaluation-harness && git checkout 0f3e95f0806e78a4f432056cdb1be93604a51d69 && mv main.py ../ && cd .. && rm -rf bigcode-evaluation-harness +``` + + +## Generate model answers & calculate pass@1 score + +> [!NOTE] +> Evaluation needs to be run on MBPP, HumanEval, MultiPL-E (JS) and MultiPL-E (C++). + +```bash +python main.py \ +--model=mistralai/Mistral-7B-v0.3 \ +--peft_model=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--max_length_generation=1024 \ # change to 2048 when running mbpp +--batch_size=4 \ +--use_auth_token \ +--allow_code_execution \ +--save_generations \ +--save_references \ +--tasks=humaneval \ # chosen from [mbpp, humaneval, multiple-js, multiple-cpp] +--metric_output_path=./evaluation_results_humaneval.json # change dataset name based on your choice +``` + +The model answers and pass@1 scores will be saved to `generations_{dataset_name}.json` and `evaluation_results_{dataset_name}.json`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **four pass@1 scores** for the evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/code/requirements.txt b/benchmarks/flowertune-llm/evaluation/code/requirements.txt new file mode 100644 index 000000000000..74b5b79d634d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/requirements.txt @@ -0,0 +1,7 @@ +peft==0.6.2 +datasets==2.20.0 +evaluate==0.3.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +git+https://github.com/bigcode-project/bigcode-evaluation-harness.git@0f3e95f0806e78a4f432056cdb1be93604a51d69 diff --git a/benchmarks/flowertune-llm/evaluation/finance/README.md b/benchmarks/flowertune-llm/evaluation/finance/README.md new file mode 100644 index 000000000000..b5595433a238 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/README.md @@ -0,0 +1,40 @@ +# Evaluation for Finance challenge + +We build a sentiment classification pipeline on finance-related text to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [FPB](https://huggingface.co/datasets/takala/financial_phrasebank), [FIQA](https://huggingface.co/datasets/pauri32/fiqa-2018), and [TFNS](https://huggingface.co/datasets/zeroshot/twitter-financial-news-sentiment). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/finance ./flowertune-eval-finance && rm -rf flower && cd flowertune-eval-finance +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=32 \ +--quantization=4 \ +--datasets=fpb,fiqa,tfns +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (FPB, FIQA, TFNS)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py new file mode 100644 index 000000000000..2b1a174e571f --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py @@ -0,0 +1,135 @@ +import torch +from sklearn.metrics import accuracy_score +from tqdm import tqdm +from utils import ( + add_instruct, + change_target, + format_example, + generate_label, + load_data, + save_results, +) + + +def infer_fiqa(model, tokenizer, batch_size, run_name): + name = "fiqa" + dataset = load_data("pauri32/fiqa-2018", concat=True) + + # Post process + dataset["output"] = dataset.sentiment_score.apply(generate_label) + dataset["instruction"] = dataset.apply(add_instruct, axis=1) + dataset = dataset[["sentence", "output", "instruction"]] + dataset.columns = ["input", "output", "instruction"] + + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_fpb(model, tokenizer, batch_size, run_name): + name = "fpb" + dataset = load_data("takala/financial_phrasebank", "sentences_50agree") + + # Post process + dataset.columns = ["input", "output"] + dic = {0: "negative", 1: "neutral", 2: "positive"} + dataset["output"] = dataset["output"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this news? Please choose an answer from {negative/neutral/positive}." + ) + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_tfns(model, tokenizer, batch_size, run_name): + name = "tfns" + dataset = load_data( + "zeroshot/twitter-financial-news-sentiment", valid_set="validation" + ) + + # Post process + dic = {0: "negative", 1: "positive", 2: "neutral"} + dataset["label"] = dataset["label"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this tweet? Please choose an answer from {negative/neutral/positive}." + ) + + dataset.columns = ["input", "output", "instruction"] + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def inference(dataset, model, tokenizer, batch_size): + context = dataset["context"].tolist() + + last_batch = dataset.shape[0] % batch_size + total_steps = dataset.shape[0] // batch_size + 1 + print( + f"Total len: {len(context)}. Batch size: {batch_size}. Total steps: {total_steps}" + ) + + out_text_list = [] + for i in tqdm(range(total_steps)): + idx_s = i * batch_size + tmp_context = ( + context[idx_s : idx_s + last_batch] + if i == total_steps - 1 + else context[idx_s : idx_s + batch_size] + ) + + if tmp_context: + tokens = tokenizer( + tmp_context, + return_tensors="pt", + padding=True, + max_length=512, + return_token_type_ids=False, + ) + for k in tokens.keys(): + tokens[k] = tokens[k].cuda() + res = model.generate( + **tokens, max_length=512, eos_token_id=tokenizer.eos_token_id + ) + res_sentences = [tokenizer.decode(i, skip_special_tokens=True) for i in res] + out_text = [o.split("Answer: ")[1] for o in res_sentences] + out_text_list += out_text + torch.cuda.empty_cache() + + dataset["out_text"] = out_text_list + dataset["new_target"] = dataset["target"].apply(change_target) + dataset["new_out"] = dataset["out_text"].apply(change_target) + + acc = accuracy_score(dataset["new_target"], dataset["new_out"]) + + return dataset, acc diff --git a/benchmarks/flowertune-llm/evaluation/finance/eval.py b/benchmarks/flowertune-llm/evaluation/finance/eval.py new file mode 100644 index 000000000000..3e85b2fe21af --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/eval.py @@ -0,0 +1,64 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_fiqa, infer_fpb, infer_tfns + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument("--datasets", type=str, default="fpb") +parser.add_argument("--batch-size", type=int, default=32) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +if not tokenizer.pad_token or tokenizer.pad_token_id == tokenizer.eos_token_id: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + model.resize_token_embeddings(len(tokenizer)) + + +# Evaluate +model = model.eval() +with torch.no_grad(): + for dataset in args.datasets.split(","): + if dataset == "fpb": + infer_fpb(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "fiqa": + infer_fiqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "tfns": + infer_tfns(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/finance/requirements.txt b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt new file mode 100644 index 000000000000..be4cba99fa9e --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt @@ -0,0 +1,6 @@ +peft==0.6.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 diff --git a/benchmarks/flowertune-llm/evaluation/finance/utils.py b/benchmarks/flowertune-llm/evaluation/finance/utils.py new file mode 100644 index 000000000000..900d1de3e096 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/utils.py @@ -0,0 +1,70 @@ +import os + +import datasets +from datasets import Dataset + + +def load_data(dataset_path, name=None, concat=False, valid_set=None): + dataset = datasets.load_dataset(dataset_path, name, trust_remote_code=True) + + if concat: + dataset = datasets.concatenate_datasets( + [dataset["train"], dataset["validation"], dataset["test"]] + ) + + if valid_set: + dataset = dataset[valid_set] + else: + dataset = dataset if concat else dataset["train"] + dataset = dataset.train_test_split(0.25, seed=42)["test"] + + dataset = dataset.to_pandas() + + return dataset + + +def format_example(example: dict): + context = f"Instruction: {example['instruction']}\n" + if example.get("input"): + context += f"Input: {example['input']}\n" + context += "Answer: " + target = example["output"] + return {"context": context, "target": target} + + +def generate_label(value): + return "negative" if value < -0.1 else "neutral" if value < 0.1 else "positive" + + +def add_instruct(content): + tag = "tweet" if content.format == "post" else "news" + return f"What is the sentiment of this {tag}? Please choose an answer from {{negative/neutral/positive}}." + + +def change_target(x): + if "positive" in x or "Positive" in x: + return "positive" + elif "negative" in x or "Negative" in x: + return "negative" + else: + return "neutral" + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset = Dataset.from_pandas(dataset) + dataset = dataset.remove_columns( + ["input", "output", "instruction", "target", "out_text"] + ) + dataset.to_json(generation_path, orient="records") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/README.md b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md new file mode 100644 index 000000000000..18666968108d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md @@ -0,0 +1,63 @@ +# Evaluation for General NLP challenge + +We leverage MT-Bench metric provided by [FastChat](https://github.com/lm-sys/FastChat) to evaluate fine-tuned LLMs. +[MT-Bench](https://arxiv.org/abs/2306.05685) represents a comprehensive suite of multi-turn, open-ended questions designed to evaluate chat assistants. +Strong LLMs, such as GPT-4, serve as judges to assess the quality of responses provided by the chat assistants under examination. + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/general-nlp ./flowertune-eval-general-nlp && rm -rf flower && cd flowertune-eval-general-nlp +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +Download data from [FastChat](https://github.com/lm-sys/FastChat): + +```shell +git clone https://github.com/lm-sys/FastChat.git && cd FastChat && git checkout d561f87b24de197e25e3ddf7e09af93ced8dfe36 && mv fastchat/llm_judge/data ../data && cd .. && rm -rf FastChat +``` + + +## Generate model answers from MT-bench questions + +```bash +python gen_model_answer.py --peft-path=/path/to/fine-tuned-peft-model-dir/ # e.g., ./peft_1 +``` +The answers will be saved to `data/mt_bench/model_answer/[base_model_name].jsonl` in default. + + +## Generate judgments using GPT-4 + +Please follow these [instructions](https://platform.openai.com/docs/quickstart/developer-quickstart) to create a OpenAI API key. +The estimated costs of running this evaluation is approximately USD10. + +> [!NOTE] +> If you changed the base model of your LLM project specify it to the command below via `--model-list`. + +```bash +export OPENAI_API_KEY=XXXXXX # set the OpenAI API key +python gen_judgement.py --model-list Mistral-7B-v0.3 +``` + +The judgments will be saved to `data/mt_bench/model_judgment/gpt-4_single.jsonl` in default. + + +## Show MT-bench scores + +```bash +python show_result.py --model-list Mistral-7B-v0.3 +``` +GPT-4 will give a score on a scale of 10 to the first-turn (MT-1) and second-turn (MT-2) of the conversations, along with an average value as the third score. + +> [!NOTE] +> Please ensure that you provide all **three scores** when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). + diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/gen_judgement.py b/benchmarks/flowertune-llm/evaluation/general-nlp/gen_judgement.py new file mode 100644 index 000000000000..14ad3c7c6544 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/gen_judgement.py @@ -0,0 +1,130 @@ +""" +This python file is adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/llm_judge/gen_judgment.py + +FastChat (https://github.com/lm-sys/FastChat) is licensed under the Apache License, Version 2.0. + +Citation: +@misc{zheng2023judging, + title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena}, + author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu + and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang + and Joseph E. Gonzalez and Ion Stoica}, + year={2023}, + eprint={2306.05685}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""" + +import argparse +import json + +from fastchat.llm_judge.common import ( + NEED_REF_CATS, + check_data, + get_model_list, + load_judge_prompts, + load_model_answers, + load_questions, + play_a_match_single, +) +from fastchat.llm_judge.gen_judgment import make_judge_single, make_match_single +from tqdm import tqdm + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--judge-file", + type=str, + default="data/judge_prompts.jsonl", + help="The file of judge prompts.", + ) + parser.add_argument("--judge-model", type=str, default="gpt-4") + parser.add_argument( + "--model-list", + type=str, + nargs="+", + default=None, + help="A list of models to be evaluated", + ) + args = parser.parse_args() + + question_file = "data/mt_bench/question.jsonl" + answer_dir = "data/mt_bench/model_answer" + ref_answer_dir = "data/mt_bench/reference_answer" + + # Load questions + questions = load_questions(question_file, None, None) + + # Load answers + model_answers = load_model_answers(answer_dir) + ref_answers = load_model_answers(ref_answer_dir) + + # Load judge + judge_prompts = load_judge_prompts(args.judge_file) + + if args.model_list is None: + models = get_model_list(answer_dir) + else: + models = args.model_list + + judges = make_judge_single(args.judge_model, judge_prompts) + play_a_match_func = play_a_match_single + output_file = f"data/mt_bench/model_judgment/{args.judge_model}_single.jsonl" + make_match_func = make_match_single + baseline_model = None + + check_data(questions, model_answers, ref_answers, models, judges) + + question_math = [q for q in questions if q["category"] in NEED_REF_CATS] + question_default = [q for q in questions if q["category"] not in NEED_REF_CATS] + + # Make matches + matches = [] + matches += make_match_func( + question_default, models, model_answers, judges["default"], baseline_model + ) + matches += make_match_func( + question_math, + models, + model_answers, + judges["math"], + baseline_model, + ref_answers, + ) + matches += make_match_func( + question_default, + models, + model_answers, + judges["default-mt"], + baseline_model, + multi_turn=True, + ) + matches += make_match_func( + question_math, + models, + model_answers, + judges["math-mt"], + baseline_model, + ref_answers, + multi_turn=True, + ) + + match_stat = {} + match_stat["bench_name"] = "mt_bench" + match_stat["mode"] = "single" + match_stat["judge"] = args.judge_model + match_stat["baseline"] = baseline_model + match_stat["model_list"] = models + match_stat["total_num_questions"] = len(questions) + match_stat["total_num_matches"] = len(matches) + match_stat["output_path"] = output_file + + # Show match stats and prompt enter to continue + print("Stats:") + print(json.dumps(match_stat, indent=4)) + input("Press Enter to confirm...") + + # Play matches + for match in tqdm(matches): + play_a_match_func(match, output_file=output_file) diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/gen_model_answer.py b/benchmarks/flowertune-llm/evaluation/general-nlp/gen_model_answer.py new file mode 100644 index 000000000000..cefb4fbff08d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/gen_model_answer.py @@ -0,0 +1,135 @@ +""" +This python file is adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/llm_judge/gen_model_answer.py + +FastChat (https://github.com/lm-sys/FastChat) is licensed under the Apache License, Version 2.0. + +Citation: +@misc{zheng2023judging, + title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena}, + author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu + and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang + and Joseph E. Gonzalez and Ion Stoica}, + year={2023}, + eprint={2306.05685}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""" + +import argparse +import json +import os +import random +import time + +import torch +from fastchat.conversation import get_conv_template +from fastchat.llm_judge.common import load_questions, temperature_config +from peft import AutoPeftModelForCausalLM, PeftModel +from tqdm import tqdm +from transformers import AutoModelForCausalLM, AutoTokenizer + +parser = argparse.ArgumentParser() +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument("--template", type=str, default="vicuna_v1.1") +parser.add_argument("--max-new-token", type=int, default=1024) +parser.add_argument("--num-choices", type=int, default=1) +args = parser.parse_args() + +# Load model and tokenizer +model = AutoPeftModelForCausalLM.from_pretrained( + args.peft_path, torch_dtype=torch.float16 +).to("cuda") +base_model = model.peft_config["default"].base_model_name_or_path +tokenizer = AutoTokenizer.from_pretrained(base_model) + +model_name = base_model.split("/")[1] +question_file = f"./data/mt_bench/question.jsonl" +answer_file = f"./data/mt_bench/model_answer/{model_name}.jsonl" + +# Load questions +questions = load_questions(question_file, None, None) +# Random shuffle the questions to balance the loading +random.shuffle(questions) + +# Generate answers +for question in tqdm(questions): + # Set temperature value + temperature = ( + temperature_config[question["category"]] + if question["category"] in temperature_config + else 0.7 + ) + choices = [] + for i in range(args.num_choices): + torch.manual_seed(i) + conv = get_conv_template(args.template) + turns = [] + for j in range(len(question["turns"])): + qs = question["turns"][j] + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + input_ids = tokenizer([prompt]).input_ids + + do_sample = False if temperature < 1e-4 else True + + # Some models may error out when generating long outputs + try: + output_ids = model.generate( + input_ids=torch.as_tensor(input_ids).cuda(), + do_sample=do_sample, + temperature=temperature, + max_new_tokens=args.max_new_token, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = ( + output_ids[0] + if model.config.is_encoder_decoder + else output_ids[0][len(input_ids[0]) :] + ) + + # Be consistent with the template's stop_token_ids + if conv.stop_token_ids: + stop_token_ids_index = [ + i + for i, id in enumerate(output_ids) + if id in conv.stop_token_ids + ] + if len(stop_token_ids_index) > 0: + output_ids = output_ids[: stop_token_ids_index[0]] + + output = tokenizer.decode( + output_ids, + spaces_between_special_tokens=False, + ) + if conv.stop_str and output.find(conv.stop_str) > 0: + output = output[: output.find(conv.stop_str)] + for special_token in tokenizer.special_tokens_map.values(): + if isinstance(special_token, list): + for special_tok in special_token: + output = output.replace(special_tok, "") + else: + output = output.replace(special_token, "") + output = output.strip() + + if conv.name == "xgen" and output.startswith("Assistant:"): + output = output.replace("Assistant:", "", 1).strip() + except RuntimeError as e: + print("ERROR question ID: ", question["question_id"]) + output = "ERROR" + + conv.update_last_message(output) + turns.append(output) + choices.append({"index": i, "turns": turns}) + + # Dump answers + os.makedirs(os.path.dirname(answer_file), exist_ok=True) + with open(os.path.expanduser(answer_file), "a") as fout: + ans_json = { + "question_id": question["question_id"], + "model_id": model_name, + "choices": choices, + "tstamp": time.time(), + } + fout.write(json.dumps(ans_json) + "\n") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt new file mode 100644 index 000000000000..7a0f43b98698 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt @@ -0,0 +1,6 @@ +peft==0.6.2 +sentencepiece==0.2.0 +protobuf==5.27.1 +fschat[model_worker,webui]==0.2.35 +openai==0.28.0 +anthropic==0.18.1 diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/show_result.py b/benchmarks/flowertune-llm/evaluation/general-nlp/show_result.py new file mode 100644 index 000000000000..6a00c10bbdba --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/show_result.py @@ -0,0 +1,36 @@ +""" +This python file is adapted from https://github.com/lm-sys/FastChat/blob/main/fastchat/llm_judge/show_result.py + +FastChat (https://github.com/lm-sys/FastChat) is licensed under the Apache License, Version 2.0. + +Citation: +@misc{zheng2023judging, + title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena}, + author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu + and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang + and Joseph E. Gonzalez and Ion Stoica}, + year={2023}, + eprint={2306.05685}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""" + +import argparse + +from fastchat.llm_judge.show_result import display_result_single + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input-file", type=str, default=None) + parser.add_argument("--bench-name", type=str, default="mt_bench") + parser.add_argument("--judge-model", type=str, default="gpt-4") + parser.add_argument( + "--model-list", + type=str, + nargs="+", + default=None, + help="A list of models to be evaluated", + ) + args = parser.parse_args() + display_result_single(args) diff --git a/benchmarks/flowertune-llm/evaluation/medical/README.md b/benchmarks/flowertune-llm/evaluation/medical/README.md new file mode 100644 index 000000000000..628489ce8de6 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/README.md @@ -0,0 +1,41 @@ +# Evaluation for Medical challenge + +We build up a medical question answering (QA) pipeline to evaluate our fined-tuned LLMs. +Three datasets have been selected for this evaluation: [PubMedQA](https://huggingface.co/datasets/bigbio/pubmed_qa), [MedMCQA](https://huggingface.co/datasets/medmcqa), and [MedQA](https://huggingface.co/datasets/bigbio/med_qa). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/medical ./flowertune-eval-medical && rm -rf flower && cd flowertune-eval-medical +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=16 \ +--quantization=4 \ +--datasets=pubmedqa,medmcqa,medqa +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (PubMedQA, MedMCQA, MedQA)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py new file mode 100644 index 000000000000..c72e2a7894da --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py @@ -0,0 +1,174 @@ +import json + +import pandas as pd +from sklearn.metrics import accuracy_score +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import format_answer, format_example, save_results + +import datasets + +# The instructions refer to Meditron evaluation: +# https://github.com/epfLLM/meditron/blob/main/evaluation/instructions.json +INSTRUCTIONS = { + "pubmedqa": "As an expert doctor in clinical science and medical knowledge, can you tell me if the following statement is correct? Answer yes, no, or maybe.", + "medqa": "You are a medical doctor taking the US Medical Licensing Examination. You need to demonstrate your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy. Show your ability to apply the knowledge essential for medical practice. For the following multiple-choice question, select one correct answer from A to E. Base your answer on the current and standard practices referenced in medical guidelines.", + "medmcqa": "You are a medical doctor answering realworld medical entrance exam questions. Based on your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy, answer the following multiple-choice question. Select one correct answer from A to D. Base your answer on the current and standard practices referenced in medical guidelines.", +} + + +def infer_pubmedqa(model, tokenizer, batch_size, run_name): + name = "pubmedqa" + answer_type = "boolean" + dataset = datasets.load_dataset( + "bigbio/pubmed_qa", + "pubmed_qa_labeled_fold0_source", + split="test", + trust_remote_code=True, + ) + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + context = "\n".join(row["CONTEXTS"]) + row["prompt"] = f"{context}\n{row['QUESTION']}" + row["gold"] = row["final_decision"] + row["long_answer"] = row["LONG_ANSWER"] + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medqa(model, tokenizer, batch_size, run_name): + name = "medqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "bigbio/med_qa", + "med_qa_en_4options_source", + split="test", + trust_remote_code=True, + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + choices = [opt["value"] for opt in row["options"]] + row["prompt"] = format_example(row["question"], choices) + for opt in row["options"]: + if opt["value"] == row["answer"]: + row["gold"] = opt["key"] + break + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medmcqa(model, tokenizer, batch_size, run_name): + name = "medmcqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "medmcqa", split="validation", trust_remote_code=True + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + options = [row["opa"], row["opb"], row["opc"], row["opd"]] + answer = int(row["cop"]) + row["prompt"] = format_example(row["question"], options) + row["gold"] = chr(ord("A") + answer) if answer in [0, 1, 2, 3] else None + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type +): + # Run inference + prediction = inference(dataset, model, tokenizer, batch_size) + + # Calculate accuracy + acc = accuracy_compute(prediction, answer_type) + + # Save results and generations + save_results(name, run_name, prediction, acc) + + +def inference(dataset, model, tokenizer, batch_size): + columns_process = ["prompt", "gold"] + dataset_process = pd.DataFrame(dataset, columns=dataset.features)[columns_process] + dataset_process = dataset_process.assign(output="Null") + temperature = 1.0 + + inference_data = json.loads(dataset_process.to_json(orient="records")) + data_loader = DataLoader(inference_data, batch_size=batch_size, shuffle=False) + + batch_counter = 0 + for batch in tqdm(data_loader, total=len(data_loader), position=0, leave=True): + prompts = [ + f"<|im_start|>question\n{prompt}<|im_end|>\n<|im_start|>answer\n" + for prompt in batch["prompt"] + ] + if batch_counter == 0: + print(prompts[0]) + + # Process tokenizer + stop_seq = ["###"] + if tokenizer.eos_token is not None: + stop_seq.append(tokenizer.eos_token) + if tokenizer.pad_token is not None: + stop_seq.append(tokenizer.pad_token) + max_new_tokens = len( + tokenizer(batch["gold"][0], add_special_tokens=False)["input_ids"] + ) + + outputs = [] + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + output_ids = model.generate( + inputs=input_ids, + max_new_tokens=max_new_tokens, + do_sample=False, + top_p=1.0, + temperature=temperature, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = output_ids[0][len(input_ids[0]) :] + output = tokenizer.decode(output_ids, skip_special_tokens=True) + outputs.append(output) + + for prompt, out in zip(batch["prompt"], outputs): + dataset_process.loc[dataset_process["prompt"] == prompt, "output"] = out + batch_counter += 1 + + return dataset_process + + +def accuracy_compute(dataset, answer_type): + dataset = json.loads(dataset.to_json(orient="records")) + preds, golds = [], [] + for row in dataset: + answer = row["gold"].lower() + output = row["output"].lower() + pred, gold = format_answer(output, answer, answer_type=answer_type) + preds.append(pred) + golds.append(gold) + + accuracy = accuracy_score(preds, golds) + + return accuracy diff --git a/benchmarks/flowertune-llm/evaluation/medical/eval.py b/benchmarks/flowertune-llm/evaluation/medical/eval.py new file mode 100644 index 000000000000..7405e1493e4d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/eval.py @@ -0,0 +1,62 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_medmcqa, infer_medqa, infer_pubmedqa + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument( + "--datasets", + type=str, + default="pubmedqa", + help="The dataset to infer on: [pubmedqa, medqa, medmcqa]", +) +parser.add_argument("--batch-size", type=int, default=16) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +# Evaluate +for dataset in args.datasets.split(","): + if dataset == "pubmedqa": + infer_pubmedqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medqa": + infer_medqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medmcqa": + infer_medmcqa(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/medical/requirements.txt b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt new file mode 100644 index 000000000000..adfc8b0c59db --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt @@ -0,0 +1,7 @@ +peft==0.6.2 +pandas==2.2.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 diff --git a/benchmarks/flowertune-llm/evaluation/medical/utils.py b/benchmarks/flowertune-llm/evaluation/medical/utils.py new file mode 100644 index 000000000000..44d0763d39d4 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/utils.py @@ -0,0 +1,81 @@ +import os +import re + + +def format_example(question, choices): + if not question.endswith("?") and not question.endswith("."): + question += "?" + options_str = "\n".join([f"{chr(65+i)}. {choices[i]}" for i in range(len(choices))]) + prompt = "Question: " + question + "\n\nOptions:\n" + options_str + return prompt + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset.to_json(generation_path, orient="records") + + +def format_answer(output_full, answer, answer_type="mcq"): + output = output_full + default = (output_full, answer) + if "\n##" in output: + try: + output = output.split("\n##")[1].split("\n")[0].strip().lower() + except Exception: + return default + if "###" in answer: + try: + answer = answer.split("answer is:")[1].split("###")[0].strip() + except Exception: + return default + + output = re.sub(r"[^a-zA-Z0-9]", " ", output).strip() + output = re.sub(" +", " ", output) + + if answer_type == "boolean": + output = clean_boolean_answer(output) + elif answer_type == "mcq": + output = clean_mcq_answer(output) + + if output in ["a", "b", "c", "d", "e", "yes", "no"]: + return output, answer + else: + return default + + +def clean_mcq_answer(output): + output = clean_answer(output) + try: + output = output[0] + except Exception: + return output + return output + + +def clean_boolean_answer(output): + if "yesyes" in output: + output = output.replace("yesyes", "yes") + elif "nono" in output: + output = output.replace("nono", "no") + elif "yesno" in output: + output = output.replace("yesno", "yes") + elif "noyes" in output: + output = output.replace("noyes", "no") + output = clean_answer(output) + return output + + +def clean_answer(output): + output_clean = output.encode("ascii", "ignore").decode("ascii") + return output_clean diff --git a/datasets/flwr_datasets/common/telemetry.py b/datasets/flwr_datasets/common/telemetry.py index ca484fdda73f..4bf80b93467d 100644 --- a/datasets/flwr_datasets/common/telemetry.py +++ b/datasets/flwr_datasets/common/telemetry.py @@ -25,7 +25,7 @@ from concurrent.futures import Future, ThreadPoolExecutor from enum import Enum, auto from pathlib import Path -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from flwr_datasets.common.version import package_name, package_version @@ -114,7 +114,7 @@ class EventType(str, Enum): # The type signature is not compatible with mypy, pylint and flake8 # so each of those needs to be disabled for this line. # pylint: disable-next=no-self-argument,arguments-differ,line-too-long - def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: # type: ignore # noqa: E501 + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 return name PING = auto() @@ -127,7 +127,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. -state: Dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { # Will be assigned ThreadPoolExecutor(max_workers=1) # in event() the first time it's required "executor": None, @@ -143,7 +143,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A # pylint: disable-next=unsubscriptable-object def event( event_type: EventType, - event_details: Optional[Dict[str, Any]] = None, + event_details: Optional[dict[str, Any]] = None, ) -> Future: # type: ignore """Submit create_event to ThreadPoolExecutor to avoid blocking.""" if state["executor"] is None: @@ -155,7 +155,7 @@ def event( return result -def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) -> str: +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: """Create telemetry event.""" if state["source"] is None: state["source"] = _get_source_id() diff --git a/datasets/flwr_datasets/common/typing.py b/datasets/flwr_datasets/common/typing.py index ffaefaeec313..d6d37b468494 100644 --- a/datasets/flwr_datasets/common/typing.py +++ b/datasets/flwr_datasets/common/typing.py @@ -15,7 +15,7 @@ """Flower Datasets type definitions.""" -from typing import Any, List +from typing import Any import numpy as np import numpy.typing as npt @@ -23,4 +23,4 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] diff --git a/datasets/flwr_datasets/common/version.py b/datasets/flwr_datasets/common/version.py index 48c3fc5aaa9c..3e4c9a31fd6c 100644 --- a/datasets/flwr_datasets/common/version.py +++ b/datasets/flwr_datasets/common/version.py @@ -19,15 +19,14 @@ import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower Dataset package name and version. Returns diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index e913b9095d17..72ea54773564 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -161,6 +161,11 @@ def load_partition( not need to provide this argument, but if `partitioners={"train": 10, "test": 100}`, you need to set it to differentiate which partitioner should be used. + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -203,6 +208,11 @@ def load_split(self, split: str) -> Dataset: ---------- split : str Split name of the downloaded dataset (e.g. "train", "test"). + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -307,7 +317,8 @@ def _prepare_dataset(self) -> None: raise ValueError( "Probably one of the specified parameter in `load_dataset_kwargs` " "change the return type of the datasets.load_dataset function. " - "Make sure to use parameter such that the return type is DatasetDict." + "Make sure to use parameter such that the return type is DatasetDict. " + f"The return type is currently: {type(self._dataset)}." ) if self._shuffle: # Note it shuffles all the splits. The self._dataset is DatasetDict diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index 64d75a7a7a5a..bbdfa42292c2 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -17,7 +17,7 @@ import unittest -from typing import Dict, Union +from typing import Union from unittest.mock import Mock, patch import numpy as np @@ -35,12 +35,30 @@ mocked_datasets = ["cifar100", "svhn", "sentiment140", "speech_commands"] +mocked_by_partial_download_datasets = [ + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", +] + +natural_id_datasets = [ + "flwrlabs/femnist", +] + +mocked_natural_id_datasets = [ + "flwrlabs/ucf101", + "flwrlabs/ambient-acoustic-context", + "LIUM/tedlium", +] + @parameterized_class( ("dataset_name", "test_split", "subset"), [ # Downloaded - # #Image datasets + # Image ("mnist", "test", ""), ("cifar10", "test", ""), ("fashion_mnist", "test", ""), @@ -52,8 +70,8 @@ ("scikit-learn/adult-census-income", None, ""), ("jlh/uci-mushrooms", None, ""), ("scikit-learn/iris", None, ""), - # Mocked - # #Image + # Mocked by local recreation + # Image ("cifar100", "test", ""), # Note: there's also the extra split and full_numbers subset ("svhn", "test", "cropped_digits"), @@ -61,6 +79,13 @@ ("sentiment140", "test", ""), # aka twitter # Audio ("speech_commands", "test", "v0.01"), + # Mocked by partial download + # Image + ("flwrlabs/pacs", None, ""), + ("flwrlabs/cinic10", "test", ""), + ("flwrlabs/caltech101", None, ""), + ("flwrlabs/office-home", None, ""), + ("flwrlabs/fed-isic2019", "test", ""), ], ) class BaseFederatedDatasetsTest(unittest.TestCase): @@ -86,10 +111,29 @@ def setUp(self) -> None: self.mock_load_dataset.return_value = _load_mocked_dataset( self.dataset_name, [200, 100], ["train", self.test_split], self.subset ) + elif self.dataset_name in mocked_by_partial_download_datasets: + split_names = ["train"] + skip_take_lists = [[(0, 30), (1000, 30), (2000, 40)]] + # If the dataset has split test update the mocking to include it + if self.test_split is not None: + split_names.append(self.test_split) + skip_take_lists.append([(0, 30), (100, 30), (200, 40)]) + mock_return_value = _load_mocked_dataset_dict_by_partial_download( + dataset_name=self.dataset_name, + split_names=split_names, + skip_take_lists=skip_take_lists, + subset_name=None if self.subset == "" else self.subset, + ) + self.patcher = patch("datasets.load_dataset") + self.mock_load_dataset = self.patcher.start() + self.mock_load_dataset.return_value = mock_return_value def tearDown(self) -> None: """Clean up after the dataset mocking.""" - if self.dataset_name in mocked_datasets: + if ( + self.dataset_name in mocked_datasets + or self.dataset_name in mocked_by_partial_download_datasets + ): patch.stopall() @parameterized.expand( # type: ignore @@ -341,7 +385,7 @@ def test_dict_of_partitioners_passes_partitioners(self) -> None: """Test if partitioners are passed directly (no recreation).""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": IidPartitioner(num_partitions=num_test_partitions), } @@ -375,7 +419,7 @@ def test_mixed_type_partitioners_passes_instantiated_partitioners(self) -> None: """Test if an instantiated partitioner is passed directly.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -389,7 +433,7 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: """Test if an IidPartitioner partitioner is created.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -403,17 +447,6 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: ) -natural_id_datasets = [ - "flwrlabs/femnist", -] - -mocked_natural_id_datasets = [ - "flwrlabs/ucf101", - "flwrlabs/ambient-acoustic-context", - "LIUM/tedlium", -] - - @parameterized_class( ("dataset_name", "test_split", "subset", "partition_by"), [ diff --git a/datasets/flwr_datasets/metrics/utils.py b/datasets/flwr_datasets/metrics/utils.py index 8f78b2fd4c32..14e1f8d68110 100644 --- a/datasets/flwr_datasets/metrics/utils.py +++ b/datasets/flwr_datasets/metrics/utils.py @@ -16,7 +16,7 @@ import warnings -from typing import List, Optional, Union +from typing import Optional, Union import pandas as pd @@ -206,7 +206,7 @@ def compute_frequencies( def _compute_counts( - labels: Union[List[int], List[str]], unique_labels: Union[List[int], List[str]] + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] ) -> pd.Series: """Compute the count of labels when taking into account all possible labels. @@ -237,7 +237,7 @@ def _compute_counts( def _compute_frequencies( - labels: Union[List[int], List[str]], unique_labels: Union[List[int], List[str]] + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] ) -> pd.Series: """Compute the distribution of labels when taking into account all possible labels. diff --git a/datasets/flwr_datasets/mock_utils_test.py b/datasets/flwr_datasets/mock_utils_test.py index 3324ad5e7f51..0976166648eb 100644 --- a/datasets/flwr_datasets/mock_utils_test.py +++ b/datasets/flwr_datasets/mock_utils_test.py @@ -19,7 +19,7 @@ import random import string from datetime import datetime, timedelta -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Optional, Union import numpy as np from PIL import Image @@ -30,7 +30,7 @@ def _generate_artificial_strings( num_rows: int, num_unique: int, string_length: int, seed: int = 42 -) -> List[str]: +) -> list[str]: """Create list of strings for categories or labels mocking. Note to keep the seed the same if you reuse this function for in creation of the @@ -53,7 +53,7 @@ def _generate_artificial_strings( List of generated strings. """ random.seed(seed) - unique_strings: Set[str] = set() + unique_strings: set[str] = set() while len(unique_strings) < num_unique: random_str = "".join( random.choices(string.ascii_letters + string.digits, k=string_length) @@ -68,7 +68,7 @@ def _generate_artificial_strings( return artificial_column -def _generate_artificial_categories(num_rows: int, choices: List[Any]) -> List[str]: +def _generate_artificial_categories(num_rows: int, choices: list[Any]) -> list[str]: """Create list of strings from given `choices` list.""" artificial_column = choices.copy() remaining_to_allocate = num_rows - len(choices) @@ -82,7 +82,7 @@ def _generate_random_word(length: int) -> str: return "".join(random.choices(string.ascii_letters, k=length)) -def _generate_random_text_column(num_rows: int, length: int) -> List[str]: +def _generate_random_text_column(num_rows: int, length: int) -> list[str]: """Generate a list of random text of specified length.""" text_col = [] for _ in range(num_rows): @@ -98,7 +98,7 @@ def _generate_random_sentence( ) -> str: """Generate a random sentence with words of random lengths.""" sentence_length = random.randint(min_sentence_length, max_sentence_length) - sentence: List[str] = [] + sentence: list[str] = [] while len(" ".join(sentence)) < sentence_length: word_length = random.randint(min_word_length, max_word_length) word = _generate_random_word(word_length) @@ -112,7 +112,7 @@ def _generate_random_sentences( max_word_length: int, min_sentence_length: int, max_sentence_length: int, -) -> List[str]: +) -> list[str]: """Generate a list of random sentences.""" text_col = [ _generate_random_sentence( @@ -123,7 +123,7 @@ def _generate_random_sentences( return text_col -def _make_num_rows_none(column: List[Any], num_none: int) -> List[Any]: +def _make_num_rows_none(column: list[Any], num_none: int) -> list[Any]: """Assign none num_none times to the given list.""" column_copy = column.copy() none_positions = random.sample(range(len(column_copy)), num_none) @@ -154,7 +154,7 @@ def _generate_random_date_column( end_date: datetime, date_format: str = "%a %b %d %H:%M:%S %Y", as_string: bool = True, -) -> List[Union[str, datetime]]: +) -> list[Union[str, datetime]]: """Generate a list of random dates.""" return [ _generate_random_date(start_date, end_date, date_format, as_string) @@ -162,21 +162,21 @@ def _generate_random_date_column( ] -def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> List[int]: +def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> list[int]: """Generate a list of ints.""" return [random.randint(min_int, max_int) for _ in range(num_rows)] -def _generate_random_bool_column(num_rows: int) -> List[bool]: +def _generate_random_bool_column(num_rows: int) -> list[bool]: """Generate a list of bools.""" return [random.choice([True, False]) for _ in range(num_rows)] def _generate_random_image_column( num_rows: int, - image_size: Union[Tuple[int, int], Tuple[int, int, int]], + image_size: Union[tuple[int, int], tuple[int, int, int]], simulate_type: str, -) -> List[Any]: +) -> list[Any]: """Simulate the images with the format that is found in HF Hub. Directly using `Image.fromarray` does not work because it creates `PIL.Image.Image`. @@ -207,7 +207,7 @@ def generate_random_audio_column( num_rows: int, sampling_rate: int, length_in_samples: int, -) -> List[Dict[str, Any]]: +) -> list[dict[str, Any]]: """Simulate the audio column. Audio column in the datset is comprised from an array or floats, sample_rate and a @@ -365,8 +365,8 @@ def _mock_speach_commands(num_rows: int) -> Dataset: def _load_mocked_dataset( dataset_name: str, - num_rows: List[int], - split_names: List[str], + num_rows: list[int], + split_names: list[str], subset: str = "", ) -> DatasetDict: dataset_dict = {} @@ -380,7 +380,7 @@ def _load_mocked_dataset( def _load_mocked_dataset_by_partial_download( dataset_name: str, split_name: str, - skip_take_list: List[Tuple[int, int]], + skip_take_list: list[tuple[int, int]], subset_name: Optional[str] = None, ) -> Dataset: """Download a partial dataset. @@ -423,11 +423,14 @@ def _load_mocked_dataset_by_partial_download( def _load_mocked_dataset_dict_by_partial_download( dataset_name: str, - split_names: List[str], - skip_take_lists: List[List[Tuple[int, int]]], + split_names: list[str], + skip_take_lists: list[list[tuple[int, int]]], subset_name: Optional[str] = None, ) -> DatasetDict: """Like _load_mocked_dataset_by_partial_download but for many splits.""" + assert len(split_names) == len( + skip_take_lists + ), "The split_names should be thesame length as the skip_take_lists." dataset_dict = {} for split_name, skip_take_list in zip(split_names, skip_take_lists): dataset_dict[split_name] = _load_mocked_dataset_by_partial_download( diff --git a/datasets/flwr_datasets/partitioner/__init__.py b/datasets/flwr_datasets/partitioner/__init__.py index acb2e6e832f5..a14efa1cc905 100644 --- a/datasets/flwr_datasets/partitioner/__init__.py +++ b/datasets/flwr_datasets/partitioner/__init__.py @@ -19,6 +19,7 @@ from .distribution_partitioner import DistributionPartitioner from .exponential_partitioner import ExponentialPartitioner from .grouped_natural_id_partitioner import GroupedNaturalIdPartitioner +from .id_to_size_fnc_partitioner import IdToSizeFncPartitioner from .iid_partitioner import IidPartitioner from .inner_dirichlet_partitioner import InnerDirichletPartitioner from .linear_partitioner import LinearPartitioner @@ -34,6 +35,7 @@ "DistributionPartitioner", "ExponentialPartitioner", "GroupedNaturalIdPartitioner", + "IdToSizeFncPartitioner", "IidPartitioner", "InnerDirichletPartitioner", "LinearPartitioner", diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index dce208419181..55c190087f7c 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -89,7 +89,7 @@ def __init__( # pylint: disable=R0913 self, num_partitions: int, partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], min_partition_size: int = 10, self_balancing: bool = False, shuffle: bool = True, @@ -110,8 +110,8 @@ def __init__( # pylint: disable=R0913 # Utility attributes # The attributes below are determined during the first call to load_partition self._avg_num_of_samples_per_partition: Optional[float] = None - self._unique_classes: Optional[Union[List[int], List[str]]] = None - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._unique_classes: Optional[Union[list[int], list[str]]] = None + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -142,7 +142,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -164,7 +164,7 @@ def _initialize_alpha( alpha = np.array([float(alpha)], dtype=float).repeat(self._num_partitions) elif isinstance(alpha, float): alpha = np.array([alpha], dtype=float).repeat(self._num_partitions) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_partitions: raise ValueError( "If passing alpha as a List, it needs to be of length of equal to " @@ -217,7 +217,7 @@ def _determine_partition_id_to_indices_if_needed( sampling_try = 0 while True: # Prepare data structure to store indices assigned to partition ids - partition_id_to_indices: Dict[int, List[int]] = {} + partition_id_to_indices: dict[int, list[int]] = {} for nid in range(self._num_partitions): partition_id_to_indices[nid] = [] diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py index b2407b5d5822..ed38e8ee2a41 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212 import unittest -from typing import Tuple, Union +from typing import Union import numpy as np from numpy.typing import NDArray @@ -33,7 +33,7 @@ def _dummy_setup( num_rows: int, partition_by: str, self_balancing: bool = True, -) -> Tuple[Dataset, DirichletPartitioner]: +) -> tuple[Dataset, DirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner.py b/datasets/flwr_datasets/partitioner/distribution_partitioner.py index e9acc41c707e..86be62b36070 100644 --- a/datasets/flwr_datasets/partitioner/distribution_partitioner.py +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner.py @@ -16,7 +16,7 @@ from collections import Counter -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -182,7 +182,7 @@ def __init__( # pylint: disable=R0913 self._num_unique_labels: int = 0 self._num_columns: int = 0 self._partition_id_to_indices_determined = False - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a partition based on the partition index. diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py index bfeafd355be6..306e208a706b 100644 --- a/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py @@ -17,7 +17,7 @@ import unittest from collections import Counter -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Union import numpy as np from parameterized import parameterized_class @@ -62,7 +62,7 @@ def _get_partitioner( num_unique_labels: int, preassigned_num_samples_per_label: int, rescale_mode: bool = True, -) -> Tuple[DistributionPartitioner, Dict[int, Dataset]]: +) -> tuple[DistributionPartitioner, dict[int, Dataset]]: """Create DistributionPartitioner instance.""" dataset = _dummy_dataset_setup( num_samples, @@ -83,7 +83,7 @@ def _get_partitioner( rescale=rescale_mode, ) partitioner.dataset = dataset - partitions: Dict[int, Dataset] = { + partitions: dict[int, Dataset] = { pid: partitioner.load_partition(pid) for pid in range(num_partitions) } @@ -135,7 +135,7 @@ def test_correct_num_times_classes_sampled_across_partitions(self) -> None: preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, ) - partitioned_distribution: Dict[Any, List[Any]] = { + partitioned_distribution: dict[Any, list[Any]] = { label: [] for label in partitioner.dataset.unique("labels") } @@ -162,7 +162,7 @@ def test_exact_distribution_assignment(self) -> None: preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, rescale_mode=False, ) - partitioned_distribution: Dict[Any, List[Any]] = { + partitioned_distribution: dict[Any, list[Any]] = { label: [] for label in partitioner.dataset.unique("labels") } diff --git a/datasets/flwr_datasets/partitioner/exponential_partitioner.py b/datasets/flwr_datasets/partitioner/exponential_partitioner.py index 5d9f34352af1..1bf838df5909 100644 --- a/datasets/flwr_datasets/partitioner/exponential_partitioner.py +++ b/datasets/flwr_datasets/partitioner/exponential_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class ExponentialPartitioner(SizePartitioner): +class ExponentialPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with exp(id). The amount of data each client gets is correlated with the exponent of partition ID. diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py index f10d80b3aaac..4ce4f3717190 100644 --- a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py @@ -15,7 +15,7 @@ """Grouped natural id partitioner class that works with Hugging Face Datasets.""" -from typing import Any, Dict, List, Literal +from typing import Any, Literal import numpy as np @@ -72,9 +72,9 @@ def __init__( sort_unique_ids: bool = False, ) -> None: super().__init__() - self._partition_id_to_natural_ids: Dict[int, List[Any]] = {} - self._natural_id_to_partition_id: Dict[Any, int] = {} - self._partition_id_to_indices: Dict[int, NDArrayInt] = {} + self._partition_id_to_natural_ids: dict[int, list[Any]] = {} + self._natural_id_to_partition_id: dict[Any, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} self._partition_by = partition_by self._mode = mode self._sort_unique_ids = sort_unique_ids @@ -211,7 +211,7 @@ def num_partitions(self) -> int: return len(self._partition_id_to_natural_ids) @property - def partition_id_to_natural_ids(self) -> Dict[int, List[Any]]: + def partition_id_to_natural_ids(self) -> dict[int, list[Any]]: """Partition id to the corresponding group of natural ids present. Natural ids are the unique values in `partition_by` column in dataset. @@ -219,6 +219,6 @@ def partition_id_to_natural_ids(self) -> Dict[int, List[Any]]: return self._partition_id_to_natural_ids @property - def natural_id_to_partition_id(self) -> Dict[Any, int]: + def natural_id_to_partition_id(self) -> dict[Any, int]: """Natural id to the corresponding partition id.""" return self._natural_id_to_partition_id diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py index 635d3850624d..014d18c1dc15 100644 --- a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py @@ -16,7 +16,7 @@ import unittest -from typing import List, Literal, Set +from typing import Literal from parameterized import parameterized, parameterized_class @@ -95,7 +95,7 @@ def test_allow_smaller_mode_num_partitions_and_partition_sizes( num_rows: int, num_unique_natural_id: int, group_size: int, - expected_num_unique_natural_ids: List[int], + expected_num_unique_natural_ids: list[int], ) -> None: """Test allow-smaller mode handles the remainder correctly.""" dataset = _create_dataset(num_rows, num_unique_natural_id) @@ -132,7 +132,7 @@ def test_allow_bigger_mode_num_partitions_and_partition_sizes( num_rows: int, num_unique_natural_id: int, group_size: int, - expected_num_unique_natural_ids: List[int], + expected_num_unique_natural_ids: list[int], ) -> None: """Test allow-bigger mode handles the remainder correctly.""" dataset = _create_dataset(num_rows, num_unique_natural_id) @@ -169,7 +169,7 @@ def test_drop_reminder_mode_num_partitions_and_partition_sizes( num_rows: int, num_unique_natural_id: int, group_size: int, - expected_num_unique_natural_ids: List[int], + expected_num_unique_natural_ids: list[int], ) -> None: """Test drop reminder mode.""" dataset = _create_dataset(num_rows, num_unique_natural_id) @@ -226,7 +226,7 @@ def test_no_overlapping_natural_ids( ] # Check for overlaps between partitions - seen_natural_ids: Set[str] = set() + seen_natural_ids: set[str] = set() for partition in partitions: natural_ids_in_partition = set(partition.unique("natural_id")) diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py new file mode 100644 index 000000000000..bd6336eb0801 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py @@ -0,0 +1,145 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFncPartitioner class.""" + + +from typing import Callable, Union + +import numpy as np + +import datasets +from flwr_datasets.partitioner.partitioner import Partitioner + + +class IdToSizeFncPartitioner(Partitioner): + """Base class for the deterministic size partitioning based on the `partition_id`. + + The client with `partition_id` has the following relationship regarding the number + of samples. + + `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` + + If the function doesn't transform the `partition_id` it's a linear correlation + between the number of sample for the partition and the value of `partition_id`. For + instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of + data, client 2 gets 2 units, and so on, up to partition M which gets M units. + + Note that size corresponding to the `partition_id` is deterministic, yet in case of + different dataset shuffling the assignment of samples to `partition_id` will vary. + + Parameters + ---------- + num_partitions : int + The total number of partitions that the data will be divided into. + partition_id_to_size_fn : Callable + Function that defines the relationship between partition id and the number of + samples. + """ + + def __init__( + self, + num_partitions: int, + partition_id_to_size_fn: Callable, # type: ignore[type-arg] + ) -> None: + super().__init__() + if num_partitions <= 0: + raise ValueError("The number of partitions must be greater than zero.") + self._num_partitions = num_partitions + self._partition_id_to_size_fn = partition_id_to_size_fn + + self._partition_id_to_size: dict[int, int] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} + # A flag to perform only a single compute to determine the indices + self._partition_id_to_indices_determined = False + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition based on the partition index. + + The number of samples is dependent on the partition partition_id. + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition: Dataset + single dataset partition + """ + # The partitioning is done lazily - only when the first partition is requested. + # A single run creates the indices assignments for all the partition indices. + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._determine_partition_id_to_indices_if_needed() + return self._num_partitions + + @property + def partition_id_to_size(self) -> dict[int, int]: + """Node id to the number of samples.""" + return self._partition_id_to_size + + @property + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Node id to the list of indices.""" + return self._partition_id_to_indices + + def _determine_partition_id_to_size(self) -> None: + """Determine data quantity associated with partition indices.""" + data_division_in_units = self._partition_id_to_size_fn( + np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) + ) + total_units: Union[int, float] = data_division_in_units.sum() + # Normalize the units to get the fraction total dataset + partition_sizes_as_fraction = data_division_in_units / total_units + # Calculate the number of samples + partition_sizes_as_num_of_samples = np.array( + partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 + ) + # Check if any sample is not allocated because of multiplication with fractions. + assigned_samples = np.sum(partition_sizes_as_num_of_samples) + left_unassigned_samples = len(self.dataset) - assigned_samples + # If there is any sample(s) left unassigned, assign it to the largest partition. + partition_sizes_as_num_of_samples[-1] += left_unassigned_samples + for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): + self._partition_id_to_size[idx] = partition_size + + self._check_if_partition_id_to_size_possible() + + def _determine_partition_id_to_indices_if_needed(self) -> None: + """Create an assignment of indices to the partition indices..""" + if self._partition_id_to_indices_determined is True: + return + self._determine_partition_id_to_size() + total_samples_assigned = 0 + for idx, quantity in self._partition_id_to_size.items(): + self._partition_id_to_indices[idx] = list( + range(total_samples_assigned, total_samples_assigned + quantity) + ) + total_samples_assigned += quantity + self._partition_id_to_indices_determined = True + + def _check_if_partition_id_to_size_possible(self) -> None: + all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) + if not all_positive: + raise ValueError( + f"The given specification of the parameter num_partitions" + f"={self._num_partitions} for the given dataset results " + f"in the partitions sizes that are not greater than 0." + ) diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py new file mode 100644 index 000000000000..905aa8cc9303 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py @@ -0,0 +1,104 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFuncitonPartitioner tests.""" + + +import unittest + +from parameterized import parameterized + +from datasets import Dataset +from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner + + +def _dummy_dataset(num_rows: int) -> Dataset: + data = { + "features": list(range(num_rows)), + "labels": [i % 2 for i in range(num_rows)], + } + dataset = Dataset.from_dict(data) + return dataset + + +class TestLinearPartitioner(unittest.TestCase): + """Test LinearPartitioner.""" + + @parameterized.expand( # type: ignore + [ + (1, 100), + (10, 100), + (5, 55), # This will leave some undivided samples + ] + ) + def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: + """Test the linear distribution of samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # Run a single partition loading to trigger the division + _ = partitioner.load_partition(0) + total_samples = sum(partitioner.partition_id_to_size.values()) + self.assertEqual(total_samples, num_rows) + + # Testing if each partition is getting more than the previous one + last_count = 0 + for i in range(num_partitions): + current_count = partitioner.partition_id_to_size[i] + self.assertGreaterEqual(current_count, last_count) + last_count = current_count + + @parameterized.expand( # type: ignore + [ + (10, 100), + (5, 55), # This will leave some undivided samples + (7, 77), # This will leave some undivided samples + ] + ) + def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: + """Test the logic for distributing undivided samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # If there are any undivided samples, they should be added to the largest + # partition + last_partition_id = num_partitions - 1 + actual_samples_in_last_partition = len( + partitioner.load_partition(last_partition_id) + ) + expected_samples_in_last_partition = partitioner.partition_id_to_size[ + last_partition_id + ] + self.assertEqual( + expected_samples_in_last_partition, actual_samples_in_last_partition + ) + + def test_meaningless_params(self) -> None: + """Test if the params leading to partition size not greater than zero raises.""" + num_rows = 10 + num_partitions = 100 + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + partitioner.load_partition(1) + self.assertIn( + "The given specification of the parameter num_partitions=100 for the given " + "dataset results in the partitions sizes that are not greater than 0.", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py index 64c37c4e7127..cbdc67be7fa5 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Tuple from parameterized import parameterized @@ -24,7 +23,7 @@ from flwr_datasets.partitioner.iid_partitioner import IidPartitioner -def _dummy_setup(num_partitions: int, num_rows: int) -> Tuple[Dataset, IidPartitioner]: +def _dummy_setup(num_partitions: int, num_rows: int) -> tuple[Dataset, IidPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py index e3e46813dfc8..e62b8fdbb212 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py @@ -14,7 +14,7 @@ # ============================================================================== """InnerDirichlet partitioner.""" import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -68,9 +68,9 @@ class InnerDirichletPartitioner(Partitioner): # pylint: disable=R0902 def __init__( # pylint: disable=R0913 self, - partition_sizes: Union[List[int], NDArrayInt], + partition_sizes: Union[list[int], NDArrayInt], partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], shuffle: bool = True, seed: Optional[int] = 42, ) -> None: @@ -87,11 +87,11 @@ def __init__( # pylint: disable=R0913 self._initialized_alpha = False self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator # The attributes below are determined during the first call to load_partition - self._unique_classes: Optional[Union[List[int], List[str]]] = None + self._unique_classes: Optional[Union[list[int], list[str]]] = None self._num_unique_classes: Optional[int] = None self._num_partitions = len(self._partition_sizes) - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -130,7 +130,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha_if_needed( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -159,7 +159,7 @@ def _initialize_alpha_if_needed( elif isinstance(alpha, float): assert self._num_unique_classes is not None alpha = np.array([alpha], dtype=float).repeat(self._num_unique_classes) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_unique_classes: raise ValueError( "When passing alpha as a List, its length needs needs to be " @@ -304,10 +304,10 @@ def _check_the_sum_of_partition_sizes(self) -> None: def _instantiate_partition_sizes( - partition_sizes: Union[List[int], NDArrayInt] + partition_sizes: Union[list[int], NDArrayInt] ) -> NDArrayInt: """Transform list to the ndarray of ints if needed.""" - if isinstance(partition_sizes, List): + if isinstance(partition_sizes, list): partition_sizes = np.asarray(partition_sizes) elif isinstance(partition_sizes, np.ndarray): pass diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py index 86dc8a5df532..8df09d01f916 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py @@ -15,7 +15,7 @@ """Test DirichletPartitioner.""" # pylint: disable=W0212 import unittest -from typing import List, Tuple, Union +from typing import Union from datasets import Dataset from flwr_datasets.common.typing import NDArrayFloat, NDArrayInt @@ -27,9 +27,9 @@ def _dummy_setup( num_rows: int, partition_by: str, - partition_sizes: Union[List[int], NDArrayInt], - alpha: Union[float, List[float], NDArrayFloat], -) -> Tuple[Dataset, InnerDirichletPartitioner]: + partition_sizes: Union[list[int], NDArrayInt], + alpha: Union[float, list[float], NDArrayFloat], +) -> tuple[Dataset, InnerDirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/linear_partitioner.py b/datasets/flwr_datasets/partitioner/linear_partitioner.py index 840307edcac6..07fea16df5e0 100644 --- a/datasets/flwr_datasets/partitioner/linear_partitioner.py +++ b/datasets/flwr_datasets/partitioner/linear_partitioner.py @@ -15,10 +15,10 @@ """LinearPartitioner class.""" -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class LinearPartitioner(SizePartitioner): +class LinearPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are linearly correlated with id. The amount of data each client gets is linearly correlated with the partition ID. diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py index 5a9af3271cb4..64b51855e1f4 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py @@ -15,8 +15,6 @@ """Natural id partitioner class that works with Hugging Face Datasets.""" -from typing import Dict - import numpy as np from tqdm import tqdm @@ -62,9 +60,9 @@ def __init__( partition_by: str, ): super().__init__() - self._partition_id_to_natural_id: Dict[int, str] = {} - self._natural_id_to_partition_id: Dict[str, int] = {} - self._partition_id_to_indices: Dict[int, NDArrayInt] = {} + self._partition_id_to_natural_id: dict[int, str] = {} + self._natural_id_to_partition_id: dict[str, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} self._partition_by = partition_by def _create_int_partition_id_to_natural_id(self) -> None: @@ -138,7 +136,7 @@ def num_partitions(self) -> int: return len(self._partition_id_to_natural_id) @property - def partition_id_to_natural_id(self) -> Dict[int, str]: + def partition_id_to_natural_id(self) -> dict[int, str]: """Node id to corresponding natural id present. Natural ids are the unique values in `partition_by` column in dataset. @@ -146,7 +144,7 @@ def partition_id_to_natural_id(self) -> Dict[int, str]: return self._partition_id_to_natural_id @partition_id_to_natural_id.setter - def partition_id_to_natural_id(self, value: Dict[int, str]) -> None: + def partition_id_to_natural_id(self, value: dict[int, str]) -> None: raise AttributeError( "Setting the partition_id_to_natural_id dictionary is not allowed." ) diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py index b74a044967ef..d3147985dca9 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py @@ -18,7 +18,6 @@ import itertools import math import unittest -from typing import Tuple from parameterized import parameterized @@ -28,7 +27,7 @@ def _dummy_setup( num_rows: int, n_unique_natural_ids: int -) -> Tuple[Dataset, NaturalIdPartitioner]: +) -> tuple[Dataset, NaturalIdPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner.py b/datasets/flwr_datasets/partitioner/pathological_partitioner.py index 1ee60d283044..350383f344e7 100644 --- a/datasets/flwr_datasets/partitioner/pathological_partitioner.py +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner.py @@ -16,7 +16,7 @@ import warnings -from typing import Any, Dict, List, Literal, Optional +from typing import Any, Literal, Optional import numpy as np @@ -114,13 +114,13 @@ def __init__( self._rng = np.random.default_rng(seed=self._seed) # Utility attributes - self._partition_id_to_indices: Dict[int, List[int]] = {} - self._partition_id_to_unique_labels: Dict[int, List[Any]] = { + self._partition_id_to_indices: dict[int, list[int]] = {} + self._partition_id_to_unique_labels: dict[int, list[Any]] = { pid: [] for pid in range(self._num_partitions) } - self._unique_labels: List[Any] = [] + self._unique_labels: list[Any] = [] # Count in how many partitions the label is used - self._unique_label_to_times_used_counter: Dict[Any, int] = {} + self._unique_label_to_times_used_counter: dict[Any, int] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py index 151b7e14659c..18707a56bd98 100644 --- a/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Dict import numpy as np from parameterized import parameterized @@ -79,7 +78,7 @@ def test_correct_num_classes_when_partitioned( num_classes_per_partition=num_classes_per_partition, ) partitioner.dataset = dataset - partitions: Dict[int, Dataset] = { + partitions: dict[int, Dataset] = { pid: partitioner.load_partition(pid) for pid in range(num_partitions) } unique_classes_per_partition = { diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index 11cffa515da0..3001df6dcb69 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -17,7 +17,7 @@ # pylint: disable=R0912, R0914 import math -from typing import Dict, List, Optional +from typing import Optional import numpy as np @@ -165,7 +165,7 @@ def __init__( # pylint: disable=R0913 # Utility attributes self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -299,7 +299,7 @@ def _determine_partition_id_to_indices_if_needed( nid_to_shard_indices = np.split( shard_indices_array, indices_on_which_to_split_shards )[:-1] - partition_id_to_indices: Dict[int, List[int]] = { + partition_id_to_indices: dict[int, list[int]] = { cid: [] for cid in range(self._num_partitions) } # Compute partition_id to sample indices based on the shard indices diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py index d6fa8b529595..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212, R0913 import unittest -from typing import Optional, Tuple +from typing import Optional from datasets import Dataset from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner @@ -30,7 +30,7 @@ def _dummy_setup( num_shards_per_partition: Optional[int], shard_size: Optional[int], keep_incomplete_shard: bool = False, -) -> Tuple[Dataset, ShardPartitioner]: +) -> tuple[Dataset, ShardPartitioner]: """Create a dummy dataset for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/size_partitioner.py b/datasets/flwr_datasets/partitioner/size_partitioner.py index 35937d8b9cc7..a79b6b7249f2 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,72 +15,56 @@ """SizePartitioner class.""" -from typing import Callable, Dict, List, Union - -import numpy as np +import warnings +from collections.abc import Sequence import datasets from flwr_datasets.partitioner.partitioner import Partitioner class SizePartitioner(Partitioner): - """Base class for the deterministic size partitioning based on the `partition_id`. - - The client with `partition_id` has the following relationship regarding the number - of samples. - - `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` - - If the function doesn't transform the `partition_id` it's a linear correlation - between the number of sample for the partition and the value of `partition_id`. For - instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of - data, client 2 gets 2 units, and so on, up to partition M which gets M units. - - Note that size corresponding to the `partition_id` is deterministic, yet in case of - different dataset shuffling the assignment of samples to `partition_id` will vary. + """Partitioner that creates each partition with the size specified by a user. Parameters ---------- - num_partitions : int - The total number of partitions that the data will be divided into. - partition_id_to_size_fn : Callable - Function that defines the relationship between partition id and the number of - samples. + partition_sizes : Sequence[int] + The size of each partition. partition_id 0 will have partition_sizes[0] + samples, partition_id 1 will have partition_sizes[1] samples, etc. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import SizePartitioner + >>> + >>> partition_sizes = [15_000, 5_000, 30_000] + >>> partitioner = SizePartitioner(partition_sizes) + >>> fds = FederatedDataset(dataset="cifar10", partitioners={"train": partitioner}) """ - def __init__( - self, - num_partitions: int, - partition_id_to_size_fn: Callable, # type: ignore[type-arg] - ) -> None: + def __init__(self, partition_sizes: Sequence[int]) -> None: super().__init__() - if num_partitions <= 0: - raise ValueError("The number of partitions must be greater than zero.") - self._num_partitions = num_partitions - self._partition_id_to_size_fn = partition_id_to_size_fn - - self._partition_id_to_size: Dict[int, int] = {} - self._partition_id_to_indices: Dict[int, List[int]] = {} - # A flag to perform only a single compute to determine the indices + self._pre_ds_validate_partition_sizes(partition_sizes) + self._partition_sizes = partition_sizes + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: - """Load a single partition based on the partition index. + """Load a single partition of the size of partition_sizes[partition_id]. - The number of samples is dependent on the partition partition_id. + For example if given partition_sizes=[20_000, 10_000, 30_000], + then partition_id=0 will return a partition of size 20_000, + partition_id=1 will return a partition of size 10_000, etc. Parameters ---------- partition_id : int - the index that corresponds to the requested partition + The index that corresponds to the requested partition. Returns ------- - dataset_partition: Dataset - single dataset partition + dataset_partition : Dataset + Single dataset partition. """ - # The partitioning is done lazily - only when the first partition is requested. - # A single run creates the indices assignments for all the partition indices. self._determine_partition_id_to_indices_if_needed() return self.dataset.select(self._partition_id_to_indices[partition_id]) @@ -88,58 +72,57 @@ def load_partition(self, partition_id: int) -> datasets.Dataset: def num_partitions(self) -> int: """Total number of partitions.""" self._determine_partition_id_to_indices_if_needed() - return self._num_partitions + return len(self._partition_sizes) @property - def partition_id_to_size(self) -> Dict[int, int]: - """Node id to the number of samples.""" - return self._partition_id_to_size - - @property - def partition_id_to_indices(self) -> Dict[int, List[int]]: - """Node id to the list of indices.""" + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Partition id to indices (the result of partitioning).""" + self._determine_partition_id_to_indices_if_needed() return self._partition_id_to_indices - def _determine_partition_id_to_size(self) -> None: - """Determine data quantity associated with partition indices.""" - data_division_in_units = self._partition_id_to_size_fn( - np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) - ) - total_units: Union[int, float] = data_division_in_units.sum() - # Normalize the units to get the fraction total dataset - partition_sizes_as_fraction = data_division_in_units / total_units - # Calculate the number of samples - partition_sizes_as_num_of_samples = np.array( - partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 - ) - # Check if any sample is not allocated because of multiplication with fractions. - assigned_samples = np.sum(partition_sizes_as_num_of_samples) - left_unassigned_samples = len(self.dataset) - assigned_samples - # If there is any sample(s) left unassigned, assign it to the largest partition. - partition_sizes_as_num_of_samples[-1] += left_unassigned_samples - for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): - self._partition_id_to_size[idx] = partition_size - - self._check_if_partition_id_to_size_possible() - - def _determine_partition_id_to_indices_if_needed(self) -> None: - """Create an assignment of indices to the partition indices..""" - if self._partition_id_to_indices_determined is True: + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: + """Create an assignment of indices to the partition indices.""" + if self._partition_id_to_indices_determined: return - self._determine_partition_id_to_size() - total_samples_assigned = 0 - for idx, quantity in self._partition_id_to_size.items(): - self._partition_id_to_indices[idx] = list( - range(total_samples_assigned, total_samples_assigned + quantity) - ) - total_samples_assigned += quantity + self._post_ds_validate_partition_sizes() + start = 0 + end = 0 + for partition_id, partition_size in enumerate(self._partition_sizes): + end += partition_size + indices = list(range(start, end)) + self._partition_id_to_indices[partition_id] = indices + start = end self._partition_id_to_indices_determined = True - def _check_if_partition_id_to_size_possible(self) -> None: - all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) - if not all_positive: + def _pre_ds_validate_partition_sizes(self, partition_sizes: Sequence[int]) -> None: + """Check if the partition sizes are valid (no information about the dataset).""" + if not isinstance(partition_sizes, Sequence): + raise ValueError("Partition sizes must be a sequence.") + if len(partition_sizes) == 0: + raise ValueError("Partition sizes must not be empty.") + if not all( + isinstance(partition_size, int) for partition_size in partition_sizes + ): + raise ValueError("All partition sizes must be integers.") + if not all(partition_size > 0 for partition_size in partition_sizes): + raise ValueError("All partition sizes must be greater than zero.") + + def _post_ds_validate_partition_sizes(self) -> None: + """Validate the partition sizes against the dataset size.""" + desired_partition_sizes = sum(self._partition_sizes) + dataset_size = len(self.dataset) + if desired_partition_sizes > dataset_size: raise ValueError( - f"The given specification of the parameter num_partitions" - f"={self._num_partitions} for the given dataset results " - f"in the partitions sizes that are not greater than 0." + f"The sum of partition sizes sum({self._partition_sizes})" + f"= {desired_partition_sizes} is greater than the size of" + f" the dataset {dataset_size}." + ) + if desired_partition_sizes < dataset_size: + warnings.warn( + f"The sum of partition sizes is {desired_partition_sizes}, which is" + f"smaller than the size of the dataset: {dataset_size}. " + f"Ignore this warning if it is the desired behavior.", + stacklevel=1, ) diff --git a/datasets/flwr_datasets/partitioner/size_partitioner_test.py b/datasets/flwr_datasets/partitioner/size_partitioner_test.py index 086ca3731e58..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner_test.py @@ -12,92 +12,380 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""SizePartitioner tests.""" +"""Test ShardPartitioner.""" +# pylint: disable=W0212, R0913 import unittest - -from parameterized import parameterized +from typing import Optional from datasets import Dataset -from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner +from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner -def _dummy_dataset(num_rows: int) -> Dataset: +def _dummy_setup( + num_rows: int, + partition_by: str, + num_partitions: int, + num_shards_per_partition: Optional[int], + shard_size: Optional[int], + keep_incomplete_shard: bool = False, +) -> tuple[Dataset, ShardPartitioner]: + """Create a dummy dataset for testing.""" data = { + partition_by: [i % 3 for i in range(num_rows)], "features": list(range(num_rows)), - "labels": [i % 2 for i in range(num_rows)], } dataset = Dataset.from_dict(data) - return dataset + partitioner = ShardPartitioner( + num_partitions=num_partitions, + num_shards_per_partition=num_shards_per_partition, + partition_by=partition_by, + shard_size=shard_size, + keep_incomplete_shard=keep_incomplete_shard, + ) + partitioner.dataset = dataset + return dataset, partitioner + + +class TestShardPartitionerSpec1(unittest.TestCase): + """Test first possible initialization of ShardPartitioner. + + Specify num_shards_per_partition and shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 30, 30]) -class TestLinearPartitioner(unittest.TestCase): - """Test LinearPartitioner.""" + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. - @parameterized.expand( # type: ignore - [ - (1, 100), - (10, 100), - (5, 55), # This will leave some undivided samples + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: - """Test the linear distribution of samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # Run a single partition loading to trigger the division + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec2(unittest.TestCase): + """Test second possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=False. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) _ = partitioner.load_partition(0) - total_samples = sum(partitioner.partition_id_to_size.values()) - self.assertEqual(total_samples, num_rows) - - # Testing if each partition is getting more than the previous one - last_count = 0 - for i in range(num_partitions): - current_count = partitioner.partition_id_to_size[i] - self.assertGreaterEqual(current_count, last_count) - last_count = current_count - - @parameterized.expand( # type: ignore - [ - (10, 100), - (5, 55), # This will leave some undivided samples - (7, 77), # This will leave some undivided samples + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: - """Test the logic for distributing undivided samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # If there are any undivided samples, they should be added to the largest - # partition - last_partition_id = num_partitions - 1 - actual_samples_in_last_partition = len( - partitioner.load_partition(last_partition_id) + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec3(unittest.TestCase): + """Test third possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=True. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [33, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) - expected_samples_in_last_partition = partitioner.partition_id_to_size[ - last_partition_id + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - self.assertEqual( - expected_samples_in_last_partition, actual_samples_in_last_partition + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec4(unittest.TestCase): + """Test fourth possible initialization of ShardPartitioner. + + Specify num_shards_per_partition but not shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [36, 36, 36]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) + ] + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerIncorrectSpec(unittest.TestCase): + """Test the incorrect specification cases. - def test_meaningless_params(self) -> None: - """Test if the params leading to partition size not greater than zero raises.""" + The lack of correctness can be caused by the num_partitions, shard_size and + num_shards_per_partition can create. + """ + + def test_incorrect_specification(self) -> None: + """Test if the given specification makes the partitioning possible.""" + partition_by = "label" num_rows = 10 - num_partitions = 100 - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - with self.assertRaises(ValueError) as context: - partitioner.load_partition(1) - self.assertIn( - "The given specification of the parameter num_partitions=100 for the given " - "dataset results in the partitions sizes that are not greater than 0.", - str(context.exception), + num_partitions = 3 + num_shards_per_partition = 2 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(0) + + def test_too_big_shard_size(self) -> None: + """Test if it is impossible to create an empty partition.""" + partition_by = "label" + num_rows = 20 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(2).num_rows if __name__ == "__main__": diff --git a/datasets/flwr_datasets/partitioner/square_partitioner.py b/datasets/flwr_datasets/partitioner/square_partitioner.py index 0fa0a0803a0e..d48af247e5cb 100644 --- a/datasets/flwr_datasets/partitioner/square_partitioner.py +++ b/datasets/flwr_datasets/partitioner/square_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class SquarePartitioner(SizePartitioner): +class SquarePartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with squared id. The amount of data each client gets is correlated with the squared partition ID. diff --git a/datasets/flwr_datasets/preprocessor/divider_test.py b/datasets/flwr_datasets/preprocessor/divider_test.py index ed282fbc18be..bb92d72c1c4a 100644 --- a/datasets/flwr_datasets/preprocessor/divider_test.py +++ b/datasets/flwr_datasets/preprocessor/divider_test.py @@ -15,7 +15,7 @@ """Divider tests.""" import unittest -from typing import Dict, Union +from typing import Union from parameterized import parameterized_class @@ -84,14 +84,14 @@ class TestDivider(unittest.TestCase): """Divider tests.""" divide_config: Union[ - Dict[str, float], - Dict[str, int], - Dict[str, Dict[str, float]], - Dict[str, Dict[str, int]], + dict[str, float], + dict[str, int], + dict[str, dict[str, float]], + dict[str, dict[str, int]], ] divide_split: str drop_remaining_splits: bool - split_name_to_size: Dict[str, int] + split_name_to_size: dict[str, int] def setUp(self) -> None: """Set up the dataset with 3 splits for tests.""" diff --git a/datasets/flwr_datasets/preprocessor/merger.py b/datasets/flwr_datasets/preprocessor/merger.py index 2b76dbbafe4b..e47993dd686e 100644 --- a/datasets/flwr_datasets/preprocessor/merger.py +++ b/datasets/flwr_datasets/preprocessor/merger.py @@ -18,7 +18,6 @@ import collections import warnings from functools import reduce -from typing import Dict, List, Tuple import datasets from datasets import Dataset, DatasetDict @@ -56,9 +55,9 @@ class Merger: def __init__( self, - merge_config: Dict[str, Tuple[str, ...]], + merge_config: dict[str, tuple[str, ...]], ) -> None: - self._merge_config: Dict[str, Tuple[str, ...]] = merge_config + self._merge_config: dict[str, tuple[str, ...]] = merge_config self._check_duplicate_merge_splits() def __call__(self, dataset: DatasetDict) -> DatasetDict: @@ -70,7 +69,7 @@ def resplit(self, dataset: DatasetDict) -> DatasetDict: """Resplit the dataset according to the `merge_config`.""" resplit_dataset = {} for divide_to, divided_from__list in self._merge_config.items(): - datasets_from_list: List[Dataset] = [] + datasets_from_list: list[Dataset] = [] for divide_from in divided_from__list: datasets_from_list.append(dataset[divide_from]) if len(datasets_from_list) > 1: diff --git a/datasets/flwr_datasets/preprocessor/merger_test.py b/datasets/flwr_datasets/preprocessor/merger_test.py index 137b0dd1a660..0dd534229eb0 100644 --- a/datasets/flwr_datasets/preprocessor/merger_test.py +++ b/datasets/flwr_datasets/preprocessor/merger_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Dict, Tuple import pytest @@ -39,28 +38,28 @@ def setUp(self) -> None: def test_resplitting_train_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_train"]), 3) def test_resplitting_valid_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_valid": ("valid",)} + strategy: dict[str, tuple[str, ...]] = {"new_valid": ("valid",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_valid"]), 2) def test_resplitting_test_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_test": ("test",)} + strategy: dict[str, tuple[str, ...]] = {"new_test": ("test",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_test"]), 1) def test_resplitting_train_the_same(self) -> None: """Test if resplitting for just renaming keeps the dataset the same.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertTrue( @@ -69,7 +68,7 @@ def test_resplitting_train_the_same(self) -> None: def test_combined_train_valid_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid") } merger = Merger(strategy) @@ -78,7 +77,7 @@ def test_combined_train_valid_size(self) -> None: def test_resplitting_test_with_combined_strategy_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid"), "test": ("test",), } @@ -88,7 +87,7 @@ def test_resplitting_test_with_combined_strategy_size(self) -> None: def test_invalid_resplit_strategy_exception_message(self) -> None: """Test if the resplitting raises error when non-existing split is given.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("invalid_split",), "new_test": ("test",), } @@ -100,7 +99,7 @@ def test_invalid_resplit_strategy_exception_message(self) -> None: def test_nonexistent_split_in_strategy(self) -> None: """Test if the exception is raised when the nonexistent split name is given.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_split": ("nonexistent_split",)} + strategy: dict[str, tuple[str, ...]] = {"new_split": ("nonexistent_split",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'nonexistent_split' is not present" @@ -109,7 +108,7 @@ def test_nonexistent_split_in_strategy(self) -> None: def test_duplicate_merge_split_name(self) -> None: """Test that the new split names are not the same.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("train", "valid"), "test": ("train",), } @@ -119,7 +118,7 @@ def test_duplicate_merge_split_name(self) -> None: def test_empty_dataset_dict(self) -> None: """Test that the error is raised when the empty DatasetDict is given.""" empty_dataset = DatasetDict({}) - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'train' is not present" diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index 32904ded2861..1657c2a0ebd3 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.partitioner import IidPartitioner, Partitioner @@ -48,12 +48,17 @@ "Mike0307/MNIST-M", "flwrlabs/usps", "scikit-learn/iris", + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", ] def _instantiate_partitioners( - partitioners: Dict[str, Union[Partitioner, int]] -) -> Dict[str, Partitioner]: + partitioners: dict[str, Union[Partitioner, int]] +) -> dict[str, Partitioner]: """Transform the partitioners from the initial format to instantiated objects. Parameters @@ -66,8 +71,8 @@ def _instantiate_partitioners( partitioners : Dict[str, Partitioner] Partitioners specified as split to Partitioner object. """ - instantiated_partitioners: Dict[str, Partitioner] = {} - if isinstance(partitioners, Dict): + instantiated_partitioners: dict[str, Partitioner] = {} + if isinstance(partitioners, dict): for split, partitioner in partitioners.items(): if isinstance(partitioner, Partitioner): instantiated_partitioners[split] = partitioner @@ -90,10 +95,10 @@ def _instantiate_partitioners( def _instantiate_merger_if_needed( - merger: Optional[Union[Preprocessor, Dict[str, Tuple[str, ...]]]] + merger: Optional[Union[Preprocessor, dict[str, tuple[str, ...]]]] ) -> Optional[Preprocessor]: """Instantiate `Merger` if preprocessor is merge_config.""" - if merger and isinstance(merger, Dict): + if merger and isinstance(merger, dict): merger = Merger(merge_config=merger) return cast(Optional[Preprocessor], merger) @@ -108,8 +113,8 @@ def _check_if_dataset_tested(dataset: str) -> None: def divide_dataset( - dataset: Dataset, division: Union[List[float], Tuple[float, ...], Dict[str, float]] -) -> Union[List[Dataset], DatasetDict]: + dataset: Dataset, division: Union[list[float], tuple[float, ...], dict[str, float]] +) -> Union[list[Dataset], DatasetDict]: """Divide the dataset according to the `division`. The division support varying number of splits, which you can name. The splits are @@ -141,7 +146,8 @@ def divide_dataset( >>> division = [0.8, 0.2] >>> train, test = divide_dataset(dataset=partition, division=division) - Use `divide_dataset` with division specified as a dict. + Use `divide_dataset` with division specified as a dict + (this accomplishes the same goal as the example with a list above). >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.utils import divide_dataset @@ -156,12 +162,12 @@ def divide_dataset( dataset_length = len(dataset) ranges = _create_division_indices_ranges(dataset_length, division) if isinstance(division, (list, tuple)): - split_partition: List[Dataset] = [] + split_partition: list[Dataset] = [] for single_range in ranges: split_partition.append(dataset.select(single_range)) return split_partition if isinstance(division, dict): - split_partition_dict: Dict[str, Dataset] = {} + split_partition_dict: dict[str, Dataset] = {} for split_name, single_range in zip(division.keys(), ranges): split_partition_dict[split_name] = dataset.select(single_range) return DatasetDict(split_partition_dict) @@ -173,8 +179,8 @@ def divide_dataset( def _create_division_indices_ranges( dataset_length: int, - division: Union[List[float], Tuple[float, ...], Dict[str, float]], -) -> List[range]: + division: Union[list[float], tuple[float, ...], dict[str, float]], +) -> list[range]: ranges = [] if isinstance(division, (list, tuple)): start_idx = 0 @@ -200,7 +206,7 @@ def _create_division_indices_ranges( def _check_division_config_types_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(isinstance(x, float) for x in division): @@ -219,7 +225,7 @@ def _check_division_config_types_correctness( def _check_division_config_values_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(0 < x <= 1 for x in division): @@ -257,7 +263,7 @@ def _check_division_config_values_correctness( def _check_division_config_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: _check_division_config_types_correctness(division) _check_division_config_values_correctness(division) @@ -265,14 +271,14 @@ def _check_division_config_correctness( def concatenate_divisions( partitioner: Partitioner, - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]], + partition_division: Union[list[float], tuple[float, ...], dict[str, float]], division_id: Union[int, str], ) -> Dataset: - """Create a dataset by concatenation of all partitions in the same division. + """Create a dataset by concatenation of divisions from all partitions. The divisions are created based on the `partition_division` and accessed based - on the `division_id`. It can be used to create e.g. centralized dataset from - federated on-edge test sets. + on the `division_id`. This fuction can be used to create e.g. centralized dataset + from federated on-edge test sets. Parameters ---------- @@ -293,6 +299,35 @@ def concatenate_divisions( ------- concatenated_divisions : Dataset A dataset created as concatenation of the divisions from all partitions. + + Examples + -------- + Use `concatenate_divisions` with division specified as a list. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds.partitioners["train"], + ... partition_division=[0.8, 0.2], + ... division_id=1 + ... ) + >>> print(concatenated_divisions) + + Use `concatenate_divisions` with division specified as a dict. + This accomplishes the same goal as the example with a list above. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds["train"], + ... partition_division={"train": 0.8, "test": 0.2}, + ... division_id="test" + ... ) + >>> print(concatenated_divisions) """ _check_division_config_correctness(partition_division) divisions = [] @@ -307,7 +342,7 @@ def concatenate_divisions( ) partition = divide_dataset(partition, partition_division) division = partition[division_id] - elif isinstance(partition_division, Dict): + elif isinstance(partition_division, dict): partition = divide_dataset(partition, partition_division) division = partition[division_id] else: diff --git a/datasets/flwr_datasets/utils_test.py b/datasets/flwr_datasets/utils_test.py index 4add9f88eeb5..3c94570471ac 100644 --- a/datasets/flwr_datasets/utils_test.py +++ b/datasets/flwr_datasets/utils_test.py @@ -14,7 +14,7 @@ # ============================================================================== """Utils tests.""" import unittest -from typing import Dict, List, Tuple, Union +from typing import Union from parameterized import parameterized_class @@ -62,8 +62,8 @@ class UtilsTests(unittest.TestCase): """Utils for tests.""" - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]] - sizes: Tuple[int] + partition_division: Union[list[float], tuple[float, ...], dict[str, float]] + sizes: tuple[int] division_id: Union[int, str] expected_concatenation_size: int diff --git a/datasets/flwr_datasets/visualization/bar_plot.py b/datasets/flwr_datasets/visualization/bar_plot.py index 352c99a572f5..2b09fb189c7a 100644 --- a/datasets/flwr_datasets/visualization/bar_plot.py +++ b/datasets/flwr_datasets/visualization/bar_plot.py @@ -15,7 +15,7 @@ """Label distribution bar plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import numpy as np import pandas as pd @@ -28,15 +28,15 @@ def _plot_bar( dataframe: pd.DataFrame, axis: Optional[Axes], - figsize: Optional[Tuple[float, float]], + figsize: Optional[tuple[float, float]], title: str, colormap: Optional[Union[str, mcolors.Colormap]], partition_id_axis: str, size_unit: str, legend: bool, legend_title: Optional[str], - plot_kwargs: Optional[Dict[str, Any]], - legend_kwargs: Optional[Dict[str, Any]], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], ) -> Axes: if axis is None: if figsize is None: @@ -123,7 +123,7 @@ def _plot_bar( def _initialize_figsize( partition_id_axis: str, num_partitions: int, -) -> Tuple[float, float]: +) -> tuple[float, float]: figsize = (0.0, 0.0) if partition_id_axis == "x": figsize = (6.4, 4.8) @@ -132,7 +132,7 @@ def _initialize_figsize( return figsize -def _initialize_xy_labels(size_unit: str, partition_id_axis: str) -> Tuple[str, str]: +def _initialize_xy_labels(size_unit: str, partition_id_axis: str) -> tuple[str, str]: xlabel = "Partition ID" ylabel = "Count" if size_unit == "absolute" else "Percent %" diff --git a/datasets/flwr_datasets/visualization/comparison_label_distribution.py b/datasets/flwr_datasets/visualization/comparison_label_distribution.py index 554f6d78d59a..8a15452fb86d 100644 --- a/datasets/flwr_datasets/visualization/comparison_label_distribution.py +++ b/datasets/flwr_datasets/visualization/comparison_label_distribution.py @@ -15,7 +15,7 @@ """Comparison of label distribution plotting.""" -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union import matplotlib.colors as mcolors import matplotlib.pyplot as plt @@ -31,22 +31,22 @@ # pylint: disable=too-many-arguments,too-many-locals def plot_comparison_label_distribution( - partitioner_list: List[Partitioner], - label_name: Union[str, List[str]], + partitioner_list: list[Partitioner], + label_name: Union[str, list[str]], plot_type: str = "bar", size_unit: str = "percent", max_num_partitions: Optional[Union[int]] = 30, partition_id_axis: str = "y", - figsize: Optional[Tuple[float, float]] = None, + figsize: Optional[tuple[float, float]] = None, subtitle: str = "Comparison of Per Partition Label Distribution", - titles: Optional[List[str]] = None, + titles: Optional[list[str]] = None, cmap: Optional[Union[str, mcolors.Colormap]] = None, legend: bool = False, legend_title: Optional[str] = None, verbose_labels: bool = True, - plot_kwargs_list: Optional[List[Optional[Dict[str, Any]]]] = None, - legend_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[Figure, List[Axes], List[pd.DataFrame]]: + plot_kwargs_list: Optional[list[Optional[dict[str, Any]]]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, list[Axes], list[pd.DataFrame]]: """Compare the label distribution across multiple partitioners. Parameters @@ -143,7 +143,7 @@ def plot_comparison_label_distribution( num_partitioners = len(partitioner_list) if isinstance(label_name, str): label_name = [label_name] * num_partitioners - elif isinstance(label_name, List): + elif isinstance(label_name, list): pass else: raise TypeError( @@ -215,8 +215,8 @@ def plot_comparison_label_distribution( def _initialize_comparison_figsize( - figsize: Optional[Tuple[float, float]], num_partitioners: int -) -> Tuple[float, float]: + figsize: Optional[tuple[float, float]], num_partitioners: int +) -> tuple[float, float]: if figsize is not None: return figsize x_value = 4 + (num_partitioners - 1) * 2 @@ -227,7 +227,7 @@ def _initialize_comparison_figsize( def _initialize_comparison_xy_labels( plot_type: str, partition_id_axis: str -) -> Tuple[str, str]: +) -> tuple[str, str]: if plot_type == "bar": xlabel = "Partition ID" ylabel = "Class distribution" diff --git a/datasets/flwr_datasets/visualization/heatmap_plot.py b/datasets/flwr_datasets/visualization/heatmap_plot.py index 3c87de7693ae..b5a0e640eb1b 100644 --- a/datasets/flwr_datasets/visualization/heatmap_plot.py +++ b/datasets/flwr_datasets/visualization/heatmap_plot.py @@ -15,7 +15,7 @@ """Label distribution heatmap plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import numpy as np import pandas as pd @@ -29,15 +29,15 @@ def _plot_heatmap( dataframe: pd.DataFrame, axis: Optional[Axes], - figsize: Optional[Tuple[float, float]], + figsize: Optional[tuple[float, float]], title: str, colormap: Optional[Union[str, mcolors.Colormap]], partition_id_axis: str, size_unit: str, legend: bool, legend_title: Optional[str], - plot_kwargs: Optional[Dict[str, Any]], - legend_kwargs: Optional[Dict[str, Any]], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], ) -> Axes: if axis is None: if figsize is None: @@ -90,7 +90,7 @@ def _initialize_figsize( partition_id_axis: str, num_partitions: int, num_labels: int, -) -> Tuple[float, float]: +) -> tuple[float, float]: figsize = (0.0, 0.0) if partition_id_axis == "x": figsize = (3 * np.sqrt(num_partitions), np.sqrt(num_labels)) diff --git a/datasets/flwr_datasets/visualization/label_distribution.py b/datasets/flwr_datasets/visualization/label_distribution.py index 0c47bd204a17..b1183c225b86 100644 --- a/datasets/flwr_datasets/visualization/label_distribution.py +++ b/datasets/flwr_datasets/visualization/label_distribution.py @@ -15,7 +15,7 @@ """Label distribution plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import matplotlib.colors as mcolors import pandas as pd @@ -40,15 +40,15 @@ def plot_label_distributions( max_num_partitions: Optional[int] = None, partition_id_axis: str = "x", axis: Optional[Axes] = None, - figsize: Optional[Tuple[float, float]] = None, + figsize: Optional[tuple[float, float]] = None, title: str = "Per Partition Label Distribution", cmap: Optional[Union[str, mcolors.Colormap]] = None, legend: bool = False, legend_title: Optional[str] = None, verbose_labels: bool = True, - plot_kwargs: Optional[Dict[str, Any]] = None, - legend_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[Figure, Axes, pd.DataFrame]: + plot_kwargs: Optional[dict[str, Any]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, Axes, pd.DataFrame]: """Plot the label distribution of the partitions. Parameters diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 46ecb56233d3..73523af2039e 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -31,7 +31,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -92,7 +91,7 @@ known_first_party = ["flwr_datasets"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] disable = "duplicate-code,too-few-public-methods,useless-import-alias" @@ -130,7 +129,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py index f9822574d0d5..c19949e358b9 100644 --- a/dev/build-docker-image-matrix.py +++ b/dev/build-docker-image-matrix.py @@ -22,7 +22,6 @@ class Distro: LATEST_SUPPORTED_PYTHON_VERSION = "3.11" SUPPORTED_PYTHON_VERSIONS = [ - "3.8", "3.9", "3.10", LATEST_SUPPORTED_PYTHON_VERSION, diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml index 05527e2b2cb3..82a10d30173b 100644 --- a/dev/changelog_config.toml +++ b/dev/changelog_config.toml @@ -3,7 +3,7 @@ type = ["ci", "docs", "feat", "fix", "refactor", "break"] -project = ["framework", "baselines", "datasets", "examples", "benchmarks"] +project = ["framework", "baselines", "datasets", "examples", "benchmarks", "glossary"] scope = "skip" diff --git a/dev/setup-defaults.sh b/dev/setup-defaults.sh index 36cbfe4df671..af5f0cb9d3ce 100755 --- a/dev/setup-defaults.sh +++ b/dev/setup-defaults.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -version=${1:-3.8.17} +version=${1:-3.9.20} # To install pyenv and virtualenv plugin function install_pyenv(){ diff --git a/dev/venv-create.sh b/dev/venv-create.sh index 63e82131d2fb..112f3a4b2917 100755 --- a/dev/venv-create.sh +++ b/dev/venv-create.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Check if the directory for the Python version does not exist and if so, # install the right Python version through pyenv diff --git a/dev/venv-delete.sh b/dev/venv-delete.sh index 3a74d2fb8a4e..50bed76b203f 100755 --- a/dev/venv-delete.sh +++ b/dev/venv-delete.sh @@ -2,6 +2,6 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} pyenv uninstall -f flower-$version diff --git a/dev/venv-reset.sh b/dev/venv-reset.sh index 69713f7df62a..5ab05f29c137 100755 --- a/dev/venv-reset.sh +++ b/dev/venv-reset.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Delete caches, venv, and lock file ./dev/rm-caches.sh diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index efa10a69531c..681916e78ed5 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -13,48 +13,198 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Architecture florale" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Moteur client Edge" +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "`Flower `_ architecture de base avec Edge Client Engine" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Moteur de client virtuel" +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." + +#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:100 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ architecture de base avec moteur de client " -"virtuel" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "Moteur client virtuel et moteur client Edge dans la même charge de travail" +#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ architecture de base avec un moteur de " -"client virtuel et un moteur de client périphérique" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +msgid "How to Build Docker Flower Images Locally" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -76,29 +226,21 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:12 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**Fourche le dépôt de Flower**" #: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 msgid "Verify the Docker daemon is running." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" - -#: ../../source/contributor-how-to-build-docker-images.rst:25 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:23 msgid "" "Flower Docker images are configured via build arguments. Through build " "arguments, we can make the creation of images more flexible. For example," @@ -109,160 +251,181 @@ msgid "" "below." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:35 +#: ../../source/contributor-how-to-build-docker-images.rst:30 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:36 #: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy msgid "Build argument" msgstr "Amélioration de la documentation" -#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:37 #: ../../source/contributor-how-to-build-docker-images.rst:99 #, fuzzy msgid "Description" msgstr "Dépréciations" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:38 #: ../../source/contributor-how-to-build-docker-images.rst:100 #, fuzzy msgid "Required" msgstr "Changements nécessaires" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:39 #: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/docker/persist-superlink-state.rst:18 +#: ../../source/docker/pin-version.rst:11 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "Exemples de PyTorch" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:40 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:41 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 +#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "No" msgstr "Aucun" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:43 msgid "``ubuntu``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:44 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "1.0.0rc1" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:48 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid "Version of ``python`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:52 msgid "``PIP_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "Version of ``pip`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #: ../../source/contributor-how-to-build-docker-images.rst:108 #, fuzzy msgid "Yes" msgstr "Types" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#, fuzzy -msgid "``23.0.1``" -msgstr "1.0.0rc1" +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:56 msgid "``SETUPTOOLS_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid "Version of ``setuptools`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 -#, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:59 +msgid ":substitution-code:`|setuptools_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:60 msgid "``FLWR_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:61 msgid "Version of Flower to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:64 msgid "``FLWR_PACKAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid "The Flower package to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "Version Python" + +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "Demande pour un nouveau Flower Example" + +#: ../../source/contributor-how-to-build-docker-images.rst:73 msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:88 msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:92 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "Démarrer le serveur" +msgid "Building a Flower Binary Image" +msgstr "Chargement des données" #: ../../source/contributor-how-to-build-docker-images.rst:102 msgid "``BASE_REPOSITORY``" @@ -286,26 +449,31 @@ msgid "The Tag of the Flower base image." msgstr "Chargement des données" #: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:111 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:121 msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:132 msgid "After creating the image, we can test whether the image is working:" msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:139 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "Demande pour un nouveau Flower Example" + #: ../../source/contributor-how-to-contribute-translations.rst:2 #, fuzzy msgid "Contribute translations" @@ -676,11 +844,11 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(avec les extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (avec les extras)" #: ../../source/contributor-how-to-install-development-versions.rst:42 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" @@ -697,12 +865,13 @@ msgstr "" "(sans les extras)" #: ../../source/contributor-how-to-install-development-versions.rst:45 +#, fuzzy msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git``" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git``" -"@nom-de-la-branche'`` (avec des extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"``@nom-de-la-branche'`` (avec des extras)" #: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Open Jupyter Notebooks on Google Colab" @@ -821,79 +990,29 @@ msgstr "" msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "Démarrer le serveur" - #: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -#, fuzzy -msgid "**Via the UI**" -msgstr "**Review the PR**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "Après la publication" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" msgstr "Crée une demande de pull qui contient les modifications suivantes :" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." msgstr "" "Mets à jour tous les fichiers qui contiennent le numéro de version actuel" " si nécessaire." -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." @@ -901,15 +1020,15 @@ msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" msgstr "Publier une pré-version" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" msgstr "Nom de la pré-version" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" @@ -918,39 +1037,39 @@ msgstr "" "Les préversions DOIVENT utiliser l'un des modèles de dénomination " "suivants :" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha : ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Bêta : ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Candidat à la publication (RC) : ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" msgstr "Voici quelques exemples :" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" msgstr "1.0.0rc1" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" @@ -958,11 +1077,11 @@ msgstr "" "Ceci est conforme au PEP-440 et aux recommandations de l'Autorité de " "l'emballage Python (PyPA) :" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -984,17 +1103,17 @@ msgstr "" "Versioning Specification `_ (en particulier le point 11 sur la préséance)." -#: ../../source/contributor-how-to-release-flower.rst:73 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "Classification avant publication" -#: ../../source/contributor-how-to-release-flower.rst:75 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" "La prochaine préversion doit-elle être appelée alpha, bêta ou release " "candidate ?" -#: ../../source/contributor-how-to-release-flower.rst:77 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " @@ -1005,11 +1124,11 @@ msgstr "" "version stable) - si aucun problème n'apparaît, cette version deviendra " "la prochaine version stable" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" msgstr "Bêta : fonctionnalité complète, autorisée à avoir des problèmes connus" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" "Alpha : les fonctionnalités ne sont pas complètes, les problèmes connus " @@ -1040,12 +1159,12 @@ msgstr "Version Python" #: ../../source/how-to-install-flower.rst:8 #, fuzzy msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower nécessite `Python 3.7 `_ ou plus, " -"nous recommandons `Python 3.8 `_." +"Flower nécessite `Python 3.9 `_ ou plus, " +"nous recommandons `Python 3.10 `_." #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 msgid "" @@ -2112,10 +2231,10 @@ msgstr "" "Flower !" #: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "Prochaines étapes" @@ -2220,14 +2339,17 @@ msgid "Get started as a contributor" msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "Prérequis" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "`Python 3.7 `_ ou plus" +msgid "`Python 3.9 `_ or above" +msgstr "`Python 3.10 `_ ou plus" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2260,8 +2382,9 @@ msgid "Developer Machine Setup" msgstr "Setup de la machine" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" -msgstr "" +#, fuzzy +msgid "Preliminaries" +msgstr "Principes" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 msgid "Some system-wide dependencies are needed." @@ -2321,22 +2444,22 @@ msgstr "" msgid "" "If you don't have :code:`pyenv` installed, the following script that will" " install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 #, fuzzy msgid "" "If you already have :code:`pyenv` installed (along with the :code:`pyenv-" "virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 #, fuzzy @@ -2487,15920 +2610,14624 @@ msgstr "" msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "Collecte centralisée des données" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante avec `FedBN `_, une stratégie" -" de formation fédérée conçue pour les données non-identifiées. Nous " -"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " -"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " -"Lors de l'application de FedBN, seules quelques modifications sont " -"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " -"fédération `_." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "Formation centralisée" +#: ../../source/docker/enable-tls.rst:7 +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 -#, fuzzy +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " -"Centralized To Federated `_. La seule chose à faire est de modifier " -"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" -"dessous :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -"L'architecture du modèle définie dans la classe Net() est ajoutée avec " -"les couches de normalisation par lots en conséquence." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -"Tu peux maintenant exécuter ta charge de travail d'apprentissage " -"automatique :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#, fuzzy +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:14 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un système d'apprentissage fédéré au sein de " -"FedBN, le système se compose d'un serveur et de deux clients." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "Formation fédérée" +#: ../../source/docker/enable-tls.rst:27 +#, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst #, fuzzy +msgid "Understanding the command" +msgstr "Entraîne le modèle" + +#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:213 +#: ../../source/docker/tutorial-quickstart-docker.rst:300 +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "" + +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:214 +#: ../../source/docker/tutorial-quickstart-docker.rst:301 +msgid "``--rm``: Remove the container once it is stopped or the command exits." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " -"`_, les parties suivantes sont faciles à suivre, seules " -"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " -":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " -"veuillez lire `Exemple : PyTorch - From Centralized To Federated " -"`. d'abord." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " -":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " -"directement." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -"Enfin, nous allons réviser notre logique *client* en modifiant " -":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " -"nous allons exclure les paramètres de normalisation des lots de la liste " -"des paramètres du modèle lors de l'envoi ou de la réception depuis le " -"serveur." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras ton projet PyTorch " -"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" -" FedBN sur deux clients. Félicitations !" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "Prochaines étapes" +#: ../../source/docker/enable-tls.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 -#, fuzzy +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"Le code source complet de cet exemple se trouve ici " -"`_. Notre exemple est bien sûr un peu trop " -"simplifié parce que les deux clients chargent exactement le même ensemble" -" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " -"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " -"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " -"?" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "Exemple : JAX - Exécuter JAX Federated" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" +msgstr "" + +#: ../../source/docker/enable-tls.rst #, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" -" pour entraîner un modèle de régression linéaire sur un ensemble de " -"données scikit-learn. Nous structurerons l'exemple de la même manière que" -" notre présentation `PyTorch - De la centralisation à la fédération " -"`_. Tout d'abord, nous construisons une approche" -" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " -"avec JAX " -"`_." -" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -"Avant de commencer à construire notre exemple JAX, nous devons installer " -"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " -":code:`flwr` :" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "Régression linéaire avec JAX" +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -"Nous commençons par une brève description du code d'entraînement " -"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" -" explication plus approfondie de ce qui se passe, jette un coup d'œil à " -"la documentation officielle `JAX `_." -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " -"composants nécessaires pour un apprentissage traditionnel (centralisé) de" -" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " -":code:`jaxlib` doivent être importés. En outre, nous devons importer " -":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " -"jeu de données et :code:`train_test_split` pour diviser le jeu de données" -" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " -"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " -"fédéré, ce qui sera fait plus tard." -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" + +#: ../../source/docker/enable-tls.rst:71 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:73 msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test mentionnés." -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst:78 msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -"L'architecture du modèle (un modèle :code:`Régression linéaire` très " -"simple) est définie dans :code:`load_model()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," -" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " -":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" -" de perte est séparée puisque JAX prend des dérivés avec une fonction " -":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " -":code:`train()`)." -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" + +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." -" La fonction prend tous les exemples de test et mesure la perte du modèle" -" de régression linéaire." -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " -"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " -"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " -"à :code:`train()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -"Tu peux maintenant exécuter ta charge de travail (centralisée) de " -"régression linéaire JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" + +#: ../../source/docker/enable-tls.rst:107 msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " -"avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX rencontre Flower" +#: ../../source/docker/enable-tls.rst +msgid "" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -"Le concept de fédération d'une charge de travail existante est toujours " -"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " -"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" -" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " -"clients.Les *clients* exécutent la formation et mettent à jour les " -"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " -"un tour du processus d'apprentissage fédéré, et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " -"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" -" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " -"pour démarrer un serveur et lui demander d'effectuer trois cycles " -"d'apprentissage fédéré." -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "Nous pouvons déjà démarrer le *serveur* :" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -"Enfin, nous allons définir la logique de notre *client* dans " -":code:`client.py` et nous appuyer sur la formation JAX définie " -"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " -":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" -" les paramètres de notre modèle JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -"L'implémentation d'un *client* Flower signifie essentiellement " -"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " -":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " -":code:`flwr.client.NumPyClient` et nous l'appellerons " -":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " -"implémenter que :code:`Client` si vous utilisez un framework avec une " -"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " -"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " -"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " -"paramètres du modèle, une méthode pour former le modèle, et une méthode " -"pour tester le modèle :" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (optional)`" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" +msgstr "" + +#: ../../source/docker/index.rst:4 msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "règle les paramètres du modèle local reçus du serveur" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" +#: ../../source/docker/index.rst:7 +msgid "" +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "Pour commencer" + +#: ../../source/docker/index.rst:20 +msgid "Running in Production" +msgstr "" + +#: ../../source/docker/index.rst:29 +#, fuzzy +msgid "Advanced Options" +msgstr "Options d'installation avancées" + +#: ../../source/docker/index.rst:41 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Serveur de Flower" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" +msgstr "" + +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " -":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/persist-superlink-state.rst:7 +msgid "" +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/persist-superlink-state.rst:10 msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"récupère les paramètres du modèle et les renvoie sous forme de liste de " -":code:`ndarray` NumPy (ce qui correspond à ce que " -":code:`flwr.client.NumPyClient` attend)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/docker/persist-superlink-state.rst:20 +msgid "" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/persist-superlink-state.rst:35 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -"mettre à jour les paramètres du modèle local avec les paramètres reçus du" -" serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/pin-version.rst:4 +msgid "" +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"récupère les paramètres du modèle local mis à jour et les renvoie au " -"serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`évaluer`" +#: ../../source/docker/pin-version.rst:13 +msgid "" +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/docker/pin-version.rst:22 +msgid "This will output" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "renvoie la perte locale au serveur" +#: ../../source/docker/pin-version.rst:29 +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"La partie la plus difficile consiste à transformer les paramètres du " -"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " -"rendre compatibles avec `NumPyClient`." -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " -"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " -"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" -" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/run-as-root-user.rst:14 +msgid "" +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " -"les commandes suivantes" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/run-as-root-user.rst:29 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Démarrer le serveur" + +#: ../../source/docker/run-as-subprocess.rst:2 #, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "Vérifier le format et tester le code" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"Le code source de cet exemple a été amélioré au fil du temps et peut être" -" trouvé ici : `Quickstart JAX " -"`_. " -"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " -"chargent le même jeu de données." -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" -" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " -"ne pas ajouter d'autres clients ?" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "Exemple : PyTorch - De la centralisation à la fédération" +#: ../../source/docker/run-as-subprocess.rst:16 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Serveur de Flower" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/run-as-subprocess.rst:30 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " -"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " -"présentons cette tâche d'apprentissage automatique avec une approche " -"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " -"PyTorch " -"`_. " -"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -"Nous commençons par une brève description du code d'entraînement CNN " -"centralisé. Si tu veux une explication plus approfondie de ce qui se " -"passe, jette un coup d'œil au tutoriel officiel `PyTorch " -"`_." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " -"composants requis pour une formation traditionnelle (centralisée) sur le " -"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " -"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " -"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " -"toutes ces importations telles quelles même lorsque nous ajouterons les " -"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" -" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " -"L'architecture du modèle (un réseau neuronal convolutif très simple) est " -"définie dans :code:`class Net()`." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test CIFAR-10. La fonction :code:`transform` normalise les données après" -" leur chargement." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 -msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" msgstr "" -"Nous devons maintenant définir la formation (fonction :code:`train()`) " -"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " -"rétropropage, puis effectue une étape d'optimisation pour chaque lot " -"d'exemples de formation." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " -"fonction boucle sur tous les échantillons de test et mesure la perte du " -"modèle en fonction de l'ensemble des données de test." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +msgid "The Docker daemon is running." msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" -" notre CNN sur CIFAR-10." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "Demande pour un nouveau Flower Example" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -"Le projet simple d'apprentissage automatique discuté dans la section " -"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," -" nous appelons cela l'apprentissage centralisé. Ce concept " -"d'apprentissage centralisé, comme le montre la section précédente, est " -"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " -"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " -"travail d'apprentissage automatique de manière fédérée, tu dois alors " -"changer la plupart de ton code et tout mettre en place à partir de zéro, " -"ce qui peut représenter un effort considérable." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " -"une configuration d'apprentissage fédéré sans avoir besoin d'une " -"réécriture majeure." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "Active la virtualenv en exécutant la commande suivante :" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#, fuzzy msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +msgid "pyproject.toml" msgstr "" -"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " -"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " -"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " -"clients. Les *clients* exécutent la formation et mettent à jour les " -"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " -"un tour du processus d'apprentissage fédéré et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " -"et nous appuyer sur la formation centralisée définie précédemment dans " -":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " -":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "Fédérer l'exemple" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "renvoie la perte locale et la précision au serveur" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`test()` définies " -"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " -"c'est que nous indiquons à Flower, par le biais de notre sous-classe " -":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 -#, fuzzy -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" msgstr "" -"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " -"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" -" données et ton modèle en utilisant :code:`cifar.py`. Démarre " -":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " -"la faisant pointer sur la même adresse IP que celle que nous avons " -"utilisée dans :code:`server.py` :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " -"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 -#, fuzzy -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." msgstr "" -"Le code source complet de cet exemple : `PyTorch : From Centralized To " -"Federated (Code) `_. Notre exemple est, bien sûr, " -"un peu trop simplifié parce que les deux clients chargent exactement le " -"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " -"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " -"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " -"d'autres clients ?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 #, fuzzy -msgid "Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Limitations" +msgstr "Simulation de moniteur" -#: ../../source/explanation-differential-privacy.rst:3 -msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#, fuzzy +msgid "Quickstart Example" +msgstr "Démarrage rapide de JAX" -#: ../../source/explanation-differential-privacy.rst:6 -msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#, fuzzy +msgid "quickstart-fastai" +msgstr "Démarrage rapide fastai" -#: ../../source/explanation-differential-privacy.rst:12 -msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "Aucun" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "Quickstart tutorials" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-jax" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/explanation-differential-privacy.rst:25 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy -msgid "Formal Definition" -msgstr "Compiler les définitions ProtoBuf" +msgid "quickstart-mlcube" +msgstr "Démarrage rapide de JAX" -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlx" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-monai" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#, fuzzy +msgid "quickstart-sklearn-tabular" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 +#: ../../source/docker/set-environment-variables.rst:2 #, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "Confidentialité différentielle" +msgid "Set Environment Variables" +msgstr "Mise en place de l'environnement de codage" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "Mise à l'échelle de l'apprentissage fédéré" +msgid "Quickstart with Docker" +msgstr "Démarrage rapide XGBoost" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/tutorial-quickstart-docker.rst:7 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 -msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/tutorial-quickstart-docker.rst:50 #, fuzzy -msgid "Central Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Step 2: Start the SuperLink" +msgstr "Démarrer le serveur" -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#, fuzzy +msgid "Open your terminal and run:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:215 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:216 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:80 #, fuzzy -msgid "Local Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Step 3: Start the SuperNode" +msgstr "Démarrer le serveur" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:124 #, fuzzy -msgid "**References:**" -msgstr "Référence" +msgid "Start the second container:" +msgstr "Démarrer le serveur" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " -"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/tutorial-quickstart-docker.rst:148 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 +#: ../../source/docker/tutorial-quickstart-docker.rst:150 #, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." -msgstr "" -"Andrew, Galen, et al. \"Differentially private learning with adaptive " -"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " -"17455-17466." +msgid "Dockerfile.clientapp" +msgstr "Flower ClientApp." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "Évaluation fédérée" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"Il existe deux approches principales pour évaluer les modèles dans les " -"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " -"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "Évaluation centralisée" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "Stratégies intégrées" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -"Toutes les stratégies intégrées prennent en charge l'évaluation " -"centralisée en fournissant une fonction d'évaluation lors de " -"l'initialisation. Une fonction d'évaluation est une fonction qui peut " -"prendre les paramètres du modèle global actuel comme entrée et renvoyer " -"les résultats de l'évaluation :" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "Stratégies personnalisées" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -"L'abstraction :code:`Strategy` fournit une méthode appelée " -":code:`evaluate` qui peut être directement utilisée pour évaluer les " -"paramètres du modèle global actuel. L'implémentation actuelle du serveur " -"appelle :code:`evaluate` après l'agrégation des paramètres et avant " -"l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "Évaluation fédérée" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "Mise en œuvre de l'évaluation fédérée" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " -"et peut être configurée côté serveur." -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "Configuration de l'évaluation fédérée" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -"L'évaluation fédérée peut être configurée du côté du serveur. Les " -"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " -"clients qui sera sélectionnée pour l'évaluation. Si " -":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " -"clients sont connectés au serveur, alors :code:`10` sera sélectionné " -"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " -"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " -":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" -" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " -"pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -":code:`min_available_clients` : un :code:`int` qui définit le nombre " -"minimum de clients qui doivent être connectés au serveur avant qu'un " -"cycle d'évaluation fédérée puisse commencer. Si moins de " -":code:`min_available_clients` sont connectés au serveur, le serveur " -"attendra que d'autres clients soient connectés avant de continuer à " -"échantillonner des clients pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:184 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " -"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" -" sera appelée à chaque tour et offre un moyen pratique de personnaliser " -"l'évaluation côté client depuis le côté serveur, par exemple pour " -"configurer le nombre d'étapes de validation effectuées." - -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-quickstart-docker.rst:189 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -"Les paramètres du modèle peuvent également être évalués pendant la " -"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " -"arbitraires sous forme de dictionnaire :" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "Exemple de code complet" +#: ../../source/docker/tutorial-quickstart-docker.rst:198 +msgid "" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 #, fuzzy +msgid "Start the first ClientApp container:" +msgstr "Utilisation du moteur du client virtuel" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -"Pour un exemple de code complet qui utilise à la fois l'évaluation " -"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " -"approche peut être appliquée aux charges de travail mises en œuvre dans " -"n'importe quel autre framework) : " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "Modèle FED" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "Table des matières" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[Table des matières](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[Résumé](#résumé)" +#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "Démarrer le serveur" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[Motivation](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst:235 +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[Buts](#buts)" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[Non-objectifs](#non-objectifs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:240 +msgid "" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[Proposition](#proposition)" +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[Inconvénients](#inconvénients)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[Alternatives envisagées](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[Annexe](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "Résumé" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -#, fuzzy -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "[TODO - phrase 1 : résumé du problème]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +msgid "" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:285 #, fuzzy -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "[TODO - phrase 2 : résumé de la solution]" +msgid "Start the SuperExec container:" +msgstr "Démarrer le serveur" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "Motivation" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -#, fuzzy -msgid "\\[TODO\\]" -msgstr "[TODO]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "Objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "Non-objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "Proposition" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "Inconvénients" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "Alternatives envisagées" +#: ../../source/docker/tutorial-quickstart-docker.rst:315 +msgid "Step 6: Run the Quickstart Project" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:317 #, fuzzy -msgid "\\[Alternative 1\\]" -msgstr "[Alternative 1]" +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/fed/0000-20200102-fed-template.md:56 -#, fuzzy -msgid "\\[Alternative 2\\]" -msgstr "[Alternative 2]" +#: ../../source/docker/tutorial-quickstart-docker.rst:326 +msgid "Run the ``quickstart-docker`` project by executing the command:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Doc sur l'amélioration des fleurs" +#: ../../source/docker/tutorial-quickstart-docker.rst:332 +msgid "Follow the SuperExec logs to track the execution of the run:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#, fuzzy +msgid "Step 7: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Métadonnées](#métadonnées)" +#: ../../source/docker/tutorial-quickstart-docker.rst:341 +msgid "" +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#, fuzzy +msgid "quickstart_docker/task.py" +msgstr "Démarrage rapide des Pandas" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst:351 +msgid "Stop the current ClientApp containers:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Chargement des données" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst:363 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -"Une amélioration de la fleur est un processus de développement " -"standardisé pour" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst:378 +msgid "Run the updated project:" msgstr "" -"fournir une structure commune pour proposer des changements plus " -"importants" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "s'assurer que la motivation du changement est claire" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst:385 +msgid "Step 8: Clean Up" msgstr "" -"conserver les informations sur le projet dans un système de contrôle des " -"versions" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst:387 +msgid "Remove the containers and the bridge network:" msgstr "" -"documenter la motivation des changements qui ont un impact sur " -"l'utilisateur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#, fuzzy +msgid "Where to Go Next" +msgstr "Par où commencer" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 -msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +#: ../../source/docker/tutorial-quickstart-docker.rst:401 +msgid ":doc:`enable-tls`" msgstr "" -"s'assurer que les participants de la communauté peuvent mener à bien les " -"changements dans le cadre d'une ou plusieurs versions et que les parties " -"prenantes sont représentées de manière adéquate tout au long du processus" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "Par conséquent, un document d'amélioration combine des aspects de" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "une caractéristique, et un document de suivi des efforts" +#: ../../source/docker/tutorial-quickstart-docker.rst:402 +msgid ":doc:`persist-superlink-state`" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "un document sur les exigences du produit" +#: ../../source/docker/tutorial-quickstart-docker.rst:403 +msgid ":doc:`tutorial-quickstart-docker-compose`" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "un document de conception" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "Démarrage rapide XGBoost" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -"en un seul fichier, qui est créé progressivement en collaboration avec la" -" communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -"Pour les changements lointains ou les fonctionnalités proposées à Flower," -" une abstraction au-delà d'une simple question GitHub ou d'une demande de" -" tirage est nécessaire pour comprendre et communiquer les changements à " -"venir dans le projet." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +msgid "Clone the Docker Compose ``complete`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -"L'objectif de ce processus est de réduire la quantité de \"connaissances " -"tribales\" dans notre communauté. En déplaçant les décisions des fils de " -"discussion Slack, des appels vidéo et des conversations de couloir vers " -"un artefact bien suivi, ce processus vise à améliorer la communication et" -" la découvrabilité." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -"Si une amélioration doit être décrite par écrit ou verbalement à " -"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " -"créer un document d'amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Serveur de Flower" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -"De même, tout effort technique (refactorisation, changement architectural" -" majeur) qui aura un impact sur une grande partie de la communauté de " -"développement doit également être communiqué à grande échelle. Le " -"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" -" sur l'utilisateur ou l'opérateur type." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -"Pour les petits changements et ajouts, passer par le processus " -"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " -"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " -"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " -"le fonctionnement ou l'utilisation de Flower." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -"Les améliorations sont différentes des demandes de fonctionnalités, car " -"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " -"défendues par les membres de la communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" -" défini et un flux de travail pour examiner et stocker les documents " -"d'amélioration pour référence - le Doc d'amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Modèle de document d'amélioration" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +msgid "" +"``--build``: Rebuild the images for each service if they don't already " +"exist." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -"Chaque document d'amélioration est fourni sous la forme d'un fichier " -"Markdown ayant la structure suivante" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +msgid "Step 3: Run the Quickstart Project" msgstr "" -"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " -"préambule YAML)" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Titre (le même que dans les métadonnées)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table des matières (si nécessaire)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 +msgid "" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Contraintes/Cavats (facultatif)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Détails de la conception (facultatif)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +msgid "quickstart-compose/pyproject.toml" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "Critères d'obtention du diplôme" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +msgid "Execute the command to run the quickstart example:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "À titre de référence, ce document suit la structure ci-dessus." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Métadonnées" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "In the next step, change the application code." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -"**numérofed** (Obligatoire) Le `numérofed` du dernier document " -"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " -"faire référence à d'autres propositions." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "Nous pouvons déjà démarrer le *serveur* :" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"**status** (obligatoire) L'état actuel de la proposition. Voir " -"[workflow](#workflow) pour les états possibles." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 -msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " -"s'agit simplement de l'identifiant GitHub." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -"**creation-date** (Obligatoire) Date à laquelle la proposition a été " -"soumise pour la première fois dans un RP." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +msgid "Run the updated quickstart example:" msgstr "" -"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" -" été modifiée de manière significative pour la dernière fois." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -"**see-also** (Facultatif) Une liste d'autres propositions qui sont " -"pertinentes par rapport à celle-ci." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " -"remplace." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "Flux de travail" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " -"discussion ou d'une présentation au sein de la communauté. À ce titre, " -"elle a besoin d'un champion, généralement l'auteur, qui se charge de " -"l'amélioration. Cette personne doit également trouver des committers to " -"Flower prêts à examiner la proposition." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " -"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " -"document d'amélioration de la fleur, dans `enhancements`. Toutes les " -"améliorations commencent à l'état `provisionnel` dans le cadre d'une " -"demande d'extraction. Les discussions sont effectuées dans le cadre de " -"l'examen de la demande d'extraction." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -"Une fois qu'une amélioration a été examinée et approuvée, son statut " -"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " -"des demandes d'extension séparées. Ces demandes d'extension doivent " -"mentionner l'amélioration concernée dans leur description. Une fois " -"l'implémentation réalisée, le statut de la proposition passe à " -"`implémented`." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +msgid "Run the command:" msgstr "" -"Sous certaines conditions, d'autres états sont possibles. Une " -"amélioration a les états suivants :" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"`provisoire` : L'amélioration a été proposée et est en cours de " -"définition. C'est l'état de départ pendant que la proposition est étoffée" -" et activement définie et discutée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`implementable` : L'amélioration a été examinée et approuvée." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"`implemented` : L'amélioration a été mise en œuvre et n'est plus " -"activement modifiée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -"`deferred` : L'amélioration est proposée mais n'est pas activement " -"travaillée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +msgid "Check the content of the ``state`` directory:" msgstr "" -"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" -" n'allait pas de l'avant." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " -"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" -" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +msgid "Step 6: Run Flower with TLS" msgstr "" -"Élargir le modèle de proposition au-delà de la description d'une seule " -"phrase actuellement requise dans le modèle de questions sur les " -"caractéristiques peut constituer une lourde charge pour les personnes " -"dont l'anglais n'est pas la langue maternelle." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "Questions sur GitHub" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" -" pourrait utiliser, par exemple, des balises pour les différencier et les" -" filtrer par rapport aux autres problèmes. Le principal problème concerne" -" la discussion et la révision d'une amélioration : les GitHub Issues " -"n'ont qu'un seul fil de discussion pour les commentaires. Les " -"améliorations ont généralement plusieurs fils de discussion en même temps" -" pour différentes parties de la documentation. La gestion de ces " -"multiples discussions peut être déroutante lorsque l'on utilise GitHub " -"Issues." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"Les Google Docs permettent de multiplier les fils de discussion. Mais " -"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " -"à ce que la communauté puisse les découvrir. Une liste de liens vers " -"toutes les propositions doit être gérée et mise à la disposition de la " -"communauté. Par rapport à l'envoi de propositions dans le cadre du " -"référentiel de Flower, le risque de liens manquants est beaucoup plus " -"élevé." -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Doc pour l'amélioration des fleurs" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -#, fuzzy -msgid "Aggregate evaluation results" -msgstr "Résultats globaux de l'évaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +msgid "Step 7: Add another SuperNode" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "Agréger les résultats de l'évaluation personnalisée" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 +msgid "" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +msgid "In ``compose.yml``, add the following:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +msgid "compose.yml" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -"La même approche de personnalisation :code:`Stratégie` peut être utilisée" -" pour agréger les résultats d'évaluation personnalisés provenant de " -"clients individuels. Les clients peuvent renvoyer des mesures " -"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " -"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +msgid "Restart the services:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 -msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +msgid "Remove all services and volumes:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/docker/use-a-different-version.rst:9 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 -msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#, fuzzy msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante avec `FedBN `_, une stratégie" +" de formation fédérée conçue pour les données non-identifiées. Nous " +"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " +"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " +"Lors de l'application de FedBN, seules quelques modifications sont " +"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " +"fédération `_." -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" -msgstr "" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +msgid "Centralized Training" +msgstr "Formation centralisée" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#, fuzzy msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" +"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " +"Centralized To Federated `_. La seule chose à faire est de modifier " +"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" +"dessous :" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" +"L'architecture du modèle définie dans la classe Net() est ajoutée avec " +"les couches de normalisation par lots en conséquence." -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +msgid "You can now run your machine learning workload:" msgstr "" +"Tu peux maintenant exécuter ta charge de travail d'apprentissage " +"automatique :" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#, fuzzy msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" +"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un système d'apprentissage fédéré au sein de " +"FedBN, le système se compose d'un serveur et de deux clients." -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "Conclusion" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +msgid "Federated Training" +msgstr "Formation fédérée" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" +"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " +"`_, les parties suivantes sont faciles à suivre, seules " +"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " +":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " +"veuillez lire `Exemple : PyTorch - From Centralized To Federated " +"`. d'abord." -#: ../../source/how-to-configure-clients.rst:2 -#, fuzzy -msgid "Configure clients" -msgstr "Configurer les clients" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +msgid "" +"Our example consists of one *server* and two *clients*. In FedBN, " +":code:`server.py` keeps unchanged, we can start the server directly." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " +":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " +"directement." -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"Finally, we will revise our *client* logic by changing " +":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " +"we will exclude batch normalization parameters from model parameter list " +"when sending to or receiving from the server." msgstr "" -"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " -"configuration aux clients. Les valeurs de configuration peuvent être " -"utilisées à diverses fins. Elles constituent, par exemple, un moyen " -"populaire de contrôler les hyperparamètres côté client à partir du " +"Enfin, nous allons réviser notre logique *client* en modifiant " +":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " +"nous allons exclure les paramètres de normalisation des lots de la liste " +"des paramètres du modèle lors de l'envoi ou de la réception depuis le " "serveur." -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "Valeurs de configuration" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -"Les valeurs de configuration sont représentées sous forme de dictionnaire" -" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " -"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " -"dans d'autres langages). Voici un exemple de dictionnaire de " -"configuration en Python :" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras ton projet PyTorch " +"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" +" FedBN sur deux clients. Félicitations !" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 +#: ../../source/tutorial-quickstart-jax.rst:283 +msgid "Next Steps" +msgstr "Prochaines étapes" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#, fuzzy msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " -"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " -"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." +"Le code source complet de cet exemple se trouve ici " +"`_. Notre exemple est bien sûr un peu trop " +"simplifié parce que les deux clients chargent exactement le même ensemble" +" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " +"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " +"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " +"?" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "Exemple : PyTorch - De la centralisation à la fédération" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"Actuellement, il n'est pas possible d'envoyer directement des types de " -"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" -" dans les dictionnaires de configuration. Il existe plusieurs solutions " -"pour envoyer des collections en tant que valeurs en les convertissant en " -"l'un des types de valeurs pris en charge (et en les reconvertissant du " -"côté client)." +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " +"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " +"présentons cette tâche d'apprentissage automatique avec une approche " +"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " +"PyTorch " +"`_. " +"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "" -"On peut, par exemple, convertir une liste de nombres à virgule flottante " -"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " -"de configuration, et enfin reconvertir la chaîne JSON en une liste de " -"nombres à virgule flottante sur le client." - -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "Configuration par le biais de stratégies intégrées" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." +msgstr "" +"Nous commençons par une brève description du code d'entraînement CNN " +"centralisé. Si tu veux une explication plus approfondie de ce qui se " +"passe, jette un coup d'œil au tutoriel officiel `PyTorch " +"`_." -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"Let's create a new file called :code:`cifar.py` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as :code:`torch` and :code:`torchvision`) need " +"to be imported. You can see that we do not import any package for " +"federated learning. You can keep all these imports as they are even when " +"we add the federated learning components at a later point." msgstr "" -"La façon la plus simple d'envoyer des valeurs de configuration aux " -"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " -"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" -" de configuration. Une fonction de configuration est une fonction que la " -"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " -"pour le tour en cours. Elle transmet ensuite le dictionnaire de " -"configuration à tous les clients sélectionnés au cours de ce tour." +"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " +"composants requis pour une formation traditionnelle (centralisée) sur le " +"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " +"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " +"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " +"toutes ces importations telles quelles même lorsque nous ajouterons les " +"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in :code:`class Net()`." msgstr "" -"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" -" la taille du lot que le client doit utiliser, (b) le cycle global actuel" -" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " -"client. Notre fonction de configuration pourrait ressembler à ceci :" +"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" +" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " +"L'architecture du modèle (un réseau neuronal convolutif très simple) est " +"définie dans :code:`class Net()`." -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"The :code:`load_data()` function loads the CIFAR-10 training and test " +"sets. The :code:`transform` normalized the data after loading." msgstr "" -"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " -"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" -" :code:`on_fit_config_fn` :" - -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test CIFAR-10. La fonction :code:`transform` normalise les données après" +" leur chargement." -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"We now need to define the training (function :code:`train()`) which loops" +" over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -"Il existe également une fonction `on_evaluate_config_fn` pour configurer " -"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " -"séparées car on peut vouloir envoyer différentes valeurs de configuration" -" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." +"Nous devons maintenant définir la formation (fonction :code:`train()`) " +"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " +"rétropropage, puis effectue une étape d'optimisation pour chaque lot " +"d'exemples de formation." -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"The evaluation of the model is defined in the function :code:`test()`. " +"The function loops over all test samples and measures the loss of the " +"model based on the test dataset." msgstr "" -"Les stratégies intégrées appellent cette fonction à chaque tour " -"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " -"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" -" à chaque tour nous permet de varier/changer le dict de config au cours " -"de tours consécutifs. Si nous voulions mettre en place un calendrier " -"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " -"locales au cours des derniers tours, nous pourrions faire ce qui suit :" - -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." - -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "Configuration des clients individuels" +"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " +"fonction boucle sur tous les échantillons de test et mesure la perte du " +"modèle en fonction de l'ensemble des données de test." -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -"Dans certains cas, il est nécessaire d'envoyer des valeurs de " -"configuration différentes à des clients différents." +"Après avoir défini le chargement des données, l'architecture du modèle, " +"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" +" notre CNN sur CIFAR-10." -#: ../../source/how-to-configure-clients.rst:89 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -"Ceci peut être réalisé en personnalisant une stratégie existante ou en " -"`mettant en œuvre une stratégie personnalisée à partir de zéro " -"`_. " -"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " -"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" -" au config dict d'un *seul client* (uniquement le premier client de la " -"liste, les autres clients de cette série ne recevant pas cette valeur de " -"configuration \"spéciale\") :" - -#: ../../source/how-to-configure-logging.rst:2 -#, fuzzy -msgid "Configure logging" -msgstr "Configurer les clients" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" +"Le projet simple d'apprentissage automatique discuté dans la section " +"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," +" nous appelons cela l'apprentissage centralisé. Ce concept " +"d'apprentissage centralisé, comme le montre la section précédente, est " +"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " +"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " +"travail d'apprentissage automatique de manière fédérée, tu dois alors " +"changer la plupart de ton code et tout mettre en place à partir de zéro, " +"ce qui peut représenter un effort considérable." -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" +"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " +"une configuration d'apprentissage fédéré sans avoir besoin d'une " +"réécriture majeure." -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in :code:`cifar.py` for the *clients* that are connected to " +"the *server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" +"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " +"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " +"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " +"clients. Les *clients* exécutent la formation et mettent à jour les " +"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " +"un tour du processus d'apprentissage fédéré et nous répétons cette " +"opération pour plusieurs tours." -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:129 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"Our example consists of one *server* and two *clients*. Let's set up " +":code:`server.py` first. The *server* needs to import the Flower package " +":code:`flwr`. Next, we use the :code:`start_server` function to start a " +"server and tell it to perform three rounds of federated learning." msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " +"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" +" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " +"pour démarrer un serveur et lui demander d'effectuer trois cycles " +"d'apprentissage fédéré." -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "We can already start the *server*:" +msgstr "Nous pouvons déjà démarrer le *serveur* :" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined centralized training in :code:`cifar.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " +"update the parameters on our PyTorch model:" msgstr "" -"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " -"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " -"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " -"log ci-dessous est également enregistré mais préfixé avec " -":code:`identifier` sur chaque ligne :" - -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "Loggez vos propres messages" +"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " +"et nous appuyer sur la formation centralisée définie précédemment dans " +":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " +":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" msgstr "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +msgid ":code:`set_parameters`" +msgstr ":code:`set_parameters`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:166 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "règle les paramètres du modèle local reçus du serveur" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 +#: ../../source/tutorial-quickstart-jax.rst:168 +msgid "" +"loop over the list of model parameters received as NumPy " +":code:`ndarray`'s (think list of neural network layers)" msgstr "" +"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " +":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#: ../../source/tutorial-quickstart-jax.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid ":code:`get_parameters`" +msgstr ":code:`get_parameters`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 +#: ../../source/tutorial-quickstart-jax.rst:170 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"get the model parameters and return them as a list of NumPy " +":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" +"récupère les paramètres du modèle et les renvoie sous forme de liste de " +":code:`ndarray` NumPy (ce qui correspond à ce que " +":code:`flwr.client.NumPyClient` attend)" -#: ../../source/how-to-enable-ssl-connections.rst:2 -#, fuzzy -msgid "Enable SSL connections" -msgstr "Collecte centralisée des données" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid ":code:`fit`" +msgstr ":code:`fit`" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:176 msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " -"comment un client Flower peut établir une connexion sécurisée avec lui." +"mettre à jour les paramètres du modèle local avec les paramètres reçus du" +" serveur" -#: ../../source/how-to-enable-ssl-connections.rst:7 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +msgid "train the model on the local training set" +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +msgid "get the updated local model weights and return them to the server" +msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid ":code:`evaluate`" +msgstr ":code:`évaluer`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 +#: ../../source/tutorial-quickstart-jax.rst:177 +msgid "evaluate the updated model on the local test set" +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +msgid "return the local loss and accuracy to the server" +msgstr "renvoie la perte locale et la précision au serveur" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`cifar.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." msgstr "" -"Un exemple de code complet démontrant une connexion sécurisée peut être " -"trouvé ici `_." +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`test()` définies " +"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " +"c'est que nous indiquons à Flower, par le biais de notre sous-classe " +":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 #, fuzzy msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +"All that's left to do it to define a function that loads both model and " +"data, creates a :code:`CifarClient`, and starts this client. You load " +"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP address we used in :code:`server.py`:" msgstr "" -"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " -"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " -"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " -"une introduction plus approfondie sur le sujet." - -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "Certificats" +"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " +"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" +" données et ton modèle en utilisant :code:`cifar.py`. Démarre " +":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " +"la faisant pointer sur la même adresse IP que celle que nous avons " +"utilisée dans :code:`server.py` :" -#: ../../source/how-to-enable-ssl-connections.rst:18 -#, fuzzy -msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 +#: ../../source/tutorial-quickstart-jax.rst:274 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -"L'utilisation de connexions compatibles avec le protocole SSL nécessite " -"que des certificats soient transmis au serveur et au client. Pour les " -"besoins de ce guide, nous allons générer des certificats auto-signés. " -"Comme cela peut devenir assez complexe, nous allons te demander " -"d'exécuter le script dans :code:`examples/advanced-" -"tensorflow/certificates/generate.sh`" +"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " +"les commandes suivantes" -#: ../../source/how-to-enable-ssl-connections.rst:29 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -"Cela générera les certificats dans :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " +"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " +"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 #, fuzzy msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -"L'approche de la génération des certificats SSL dans cet exemple peut " -"servir d'inspiration et de point de départ, mais ne doit pas être " -"considérée comme complète pour les environnements de production." +"Le code source complet de cet exemple : `PyTorch : From Centralized To " +"Federated (Code) `_. Notre exemple est, bien sûr, " +"un peu trop simplifié parce que les deux clients chargent exactement le " +"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " +"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " +"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " +"d'autres clients ?" -#: ../../source/how-to-enable-ssl-connections.rst:39 +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 #, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +msgid "Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-enable-ssl-connections.rst:41 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:54 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "Codes d'état du client." - -#: ../../source/how-to-enable-ssl-connections.rst:56 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:12 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:64 -#, fuzzy -msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -"En définissant :code:`root_certificates`, le client s'attend à recevoir " -"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " -"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " -"certificats sous forme de chaînes d'octets." -#: ../../source/how-to-enable-ssl-connections.rst:70 -#, fuzzy +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"Tu devrais maintenant avoir appris à générer des certificats auto-signés " -"à l'aide du script donné, à démarrer un serveur compatible SSL et à " -"demander à un client d'établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:75 +#: ../../source/explanation-differential-privacy.rst:25 #, fuzzy -msgid "Additional resources" -msgstr "Ressources supplémentaires" +msgid "Formal Definition" +msgstr "Compiler les définitions ProtoBuf" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " -"approfondir le sujet des certificats :" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "`Let's Encrypt `_" +#: ../../source/explanation-differential-privacy.rst:32 +msgid "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/explanation-differential-privacy.rst:38 +msgid "" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." +msgstr "" -#: ../../source/how-to-implement-strategies.rst:2 +#: ../../source/explanation-differential-privacy.rst:45 #, fuzzy -msgid "Implement strategies" -msgstr "Mettre en place des stratégies" +msgid "Differential Privacy in Machine Learning" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " -"entièrement personnalisées. Une stratégie est essentiellement " -"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " -"stratégies décident comment échantillonner les clients, comment " -"configurer les clients pour la formation, comment agréger les mises à " -"jour et comment évaluer les modèles. Flower fournit quelques stratégies " -"intégrées qui sont basées sur la même API que celle décrite ci-dessous." -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr "L'abstraction :code:`Stratégie`" +#: ../../source/explanation-differential-privacy.rst:53 +#, fuzzy +msgid "Differential Privacy in Federated Learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"Toutes les implémentations de stratégies sont dérivées de la classe de " -"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " -"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " -"que les implémentations de stratégies personnalisées ont exactement les " -"mêmes capacités à leur disposition que les implémentations intégrées." -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"L'abstraction de la stratégie définit quelques méthodes abstraites qui " -"doivent être mises en œuvre :" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/explanation-differential-privacy.rst:60 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"La création d'une nouvelle stratégie implique la mise en œuvre d'une " -"nouvelle :code:`classe` (dérivée de la classe de base abstraite " -":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " -"précédemment :" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" +#: ../../source/explanation-differential-privacy.rst:63 +msgid "" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." +#: ../../source/explanation-differential-privacy.rst:65 +msgid "" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +#, fuzzy +msgid "Central Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " -"début d'une exécution. Il est chargé de fournir les paramètres initiaux " -"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " -"d'un objet :code:`Parameters`)." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"Les stratégies intégrées renvoient les paramètres initiaux fournis par " -"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " -"peuvent être transmis à :code:`FedAvg` :" -#: ../../source/how-to-implement-strategies.rst:209 -msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " -"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" -" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " -":code:`None`), le serveur sélectionne au hasard un client et lui demande " -"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " -"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " -"prototypage. Dans la pratique, il est recommandé de toujours utiliser " -"l'initialisation des paramètres du côté du serveur." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"L'initialisation des paramètres côté serveur est un mécanisme puissant. " -"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " -"partir d'un point de contrôle précédemment sauvegardé. C'est également la" -" capacité fondamentale nécessaire pour mettre en œuvre des approches " -"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " -"l'apprentissage fédéré." -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr "La méthode :code:`configure_fit`" +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" +msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -":code:`configure_fit` est chargé de configurer le prochain tour de " -"formation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_fit` l'indique clairement :" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_fit` :" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " -"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " -"par un objet :code:`ClientProxy`)" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " -"modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy +msgid "Local Differential Privacy" +msgstr "Confidentialité différentielle" + +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_fit` pour mettre en œuvre une logique de sélection des " -"clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_fit`." -#: ../../source/how-to-implement-strategies.rst:240 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client, ce qui " -"permet d'élaborer des stratégies personnalisées pour former, par exemple," -" différents modèles sur différents clients, ou utiliser différents " -"hyperparamètres sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr "La méthode :code:`aggregate_fit` (agrégation)" +#: ../../source/explanation-differential-privacy.rst:118 +msgid "" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" -" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " -"dans :code:`configure_fit`." -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_fit`). " -":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " -"aussi une liste de :code:`échecs`." -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " -"un dictionnaire de métriques agrégées. La valeur de retour " -":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" -" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " -"exemple, trop d'échecs)." -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr "La méthode :code:`configure_evaluate` (en anglais)" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "Référence" -#: ../../source/how-to-implement-strategies.rst:265 -msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -":code:`configure_evaluate` est chargé de configurer le prochain tour " -"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" -" :" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/explanation-differential-privacy.rst:135 +#, fuzzy msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_evaluate` :" +"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " +"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " -"le modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/explanation-differential-privacy.rst:139 #, fuzzy -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " -"des clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_evaluate`." +"Andrew, Galen, et al. \"Differentially private learning with adaptive " +"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " +"17455-17466." -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "Évaluation fédérée" + +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client. Cela " -"permet aux stratégies personnalisées d'évaluer, par exemple, différents " -"modèles sur différents clients, ou d'utiliser différents hyperparamètres " -"sur différents clients (via le dict :code:`config`)." +"Il existe deux approches principales pour évaluer les modèles dans les " +"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " +"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "Évaluation centralisée" -#: ../../source/how-to-implement-strategies.rst:293 -msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." -msgstr "" -":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " -"par les clients qui ont été sélectionnés et à qui l'on a demandé " -"d'évaluer dans :code:`configure_evaluate`." +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "Stratégies intégrées" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_evaluate`). " -":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " -"mais aussi une liste d' :code:`échecs`." +"Toutes les stratégies intégrées prennent en charge l'évaluation " +"centralisée en fournissant une fonction d'évaluation lors de " +"l'initialisation. Une fonction d'évaluation est une fonction qui peut " +"prendre les paramètres du modèle global actuel comme entrée et renvoyer " +"les résultats de l'évaluation :" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/explanation-federated-evaluation.rst:58 +msgid "Custom Strategies" +msgstr "Stratégies personnalisées" + +#: ../../source/explanation-federated-evaluation.rst:60 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"The :code:`Strategy` abstraction provides a method called " +":code:`evaluate` that can directly be used to evaluate the current global" +" model parameters. The current server implementation calls " +":code:`evaluate` after parameter aggregation and before federated " +"evaluation (see next paragraph)." msgstr "" -":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" -" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " -"est facultative car :code:`aggregate_evaluate` peut décider que les " -"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " -"trop d'échecs)." +"L'abstraction :code:`Strategy` fournit une méthode appelée " +":code:`evaluate` qui peut être directement utilisée pour évaluer les " +"paramètres du modèle global actuel. L'implémentation actuelle du serveur " +"appelle :code:`evaluate` après l'agrégation des paramètres et avant " +"l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr "La méthode :code:`évaluer`" +#: ../../source/explanation-federated-evaluation.rst:65 +msgid "Federated Evaluation" +msgstr "Évaluation fédérée" -#: ../../source/how-to-implement-strategies.rst:313 -msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." -msgstr "" -"le fait d'avoir :code:`evaluate` en plus de " -":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " -"stratégies d'effectuer des évaluations à la fois côté serveur et côté " -"client (fédéré)." +#: ../../source/explanation-federated-evaluation.rst:68 +msgid "Implementing Federated Evaluation" +msgstr "Mise en œuvre de l'évaluation fédérée" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"Client-side evaluation happens in the :code:`Client.evaluate` method and " +"can be configured from the server side." msgstr "" -"La valeur de retour est à nouveau facultative parce que la stratégie peut" -" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " -"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " -"pas se terminer avec succès (par exemple, elle peut échouer à charger les" -" données de l'évaluation côté serveur)." - -#: ../../source/how-to-install-flower.rst:2 -#, fuzzy -msgid "Install Flower" -msgstr "Installer Flower" +"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " +"et peut être configurée côté serveur." -#: ../../source/how-to-install-flower.rst:6 -#, fuzzy -msgid "Python version" -msgstr "Version Python" +#: ../../source/explanation-federated-evaluation.rst:101 +msgid "Configuring Federated Evaluation" +msgstr "Configuration de l'évaluation fédérée" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "Installe la version stable" +#: ../../source/explanation-federated-evaluation.rst:103 +msgid "" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "" +"L'évaluation fédérée peut être configurée du côté du serveur. Les " +"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/explanation-federated-evaluation.rst:105 +msgid "" +":code:`fraction_evaluate`: a :code:`float` defining the fraction of " +"clients that will be selected for evaluation. If " +":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " +"are connected to the server, then :code:`10` will be randomly selected " +"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " +"federated evaluation will be disabled." msgstr "" +":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " +"clients qui sera sélectionnée pour l'évaluation. Si " +":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " +"clients sont connectés au serveur, alors :code:`10` sera sélectionné " +"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " +"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-federated-evaluation.rst:106 msgid "" -"Stable releases are available on `PyPI " -"`_::" +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " +"clients to be selected for evaluation. If :code:`fraction_evaluate` is " +"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " +":code:`100` clients are connected to the server, then :code:`20` clients " +"will be selected for evaluation." msgstr "" -"Les versions stables sont disponibles sur `PyPI " -"`_: :" +"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " +":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" +" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " +"pour l'évaluation." -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-federated-evaluation.rst:107 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +":code:`min_available_clients`: an :code:`int` that defines the minimum " +"number of clients which need to be connected to the server before a round" +" of federated evaluation can start. If fewer than " +":code:`min_available_clients` are connected to the server, the server " +"will wait until more clients are connected before it continues to sample " +"clients for evaluation." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " -"doit être installé avec l'option ``simulation``: :" +":code:`min_available_clients` : un :code:`int` qui définit le nombre " +"minimum de clients qui doivent être connectés au serveur avant qu'un " +"cycle d'évaluation fédérée puisse commencer. Si moins de " +":code:`min_available_clients` sont connectés au serveur, le serveur " +"attendra que d'autres clients soient connectés avant de continuer à " +"échantillonner des clients pour l'évaluation." -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "" +":code:`on_evaluate_config_fn`: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" +":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " +"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" +" sera appelée à chaque tour et offre un moyen pratique de personnaliser " +"l'évaluation côté client depuis le côté serveur, par exemple pour " +"configurer le nombre d'étapes de validation effectuées." -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/explanation-federated-evaluation.rst:135 +msgid "Evaluating Local Model Updates During Training" +msgstr "Évaluer les mises à jour du modèle local pendant la formation" + +#: ../../source/explanation-federated-evaluation.rst:137 +msgid "" +"Model parameters can also be evaluated during training. " +":code:`Client.fit` can return arbitrary evaluation results as a " +"dictionary:" msgstr "" +"Les paramètres du modèle peuvent également être évalués pendant la " +"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " +"arbitraires sous forme de dictionnaire :" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-federated-evaluation.rst:177 +msgid "Full Code Example" +msgstr "Exemple de code complet" + +#: ../../source/explanation-federated-evaluation.rst:179 +#, fuzzy msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" +"Pour un exemple de code complet qui utilise à la fois l'évaluation " +"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " +"approche peut être appliquée aux charges de travail mises en œuvre dans " +"n'importe quel autre framework) : " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/explanation-flower-architecture.rst:3 +msgid "Flower Architecture" +msgstr "Architecture florale" + +#: ../../source/explanation-flower-architecture.rst:5 +msgid "" +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "Vérifie l'installation" +#: ../../source/explanation-flower-architecture.rst:8 +msgid "" +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." +msgstr "" -#: ../../source/how-to-install-flower.rst:48 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:12 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -"La commande suivante peut être utilisée pour vérifier si Flower a été " -"installé avec succès. Si tout a fonctionné, la version de Flower devrait " -"être imprimée sur la ligne de commande: :" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "Options d'installation avancées" +#: ../../source/explanation-flower-architecture.rst:16 +msgid "This is sometimes called a hub-and-spoke topology:" +msgstr "" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/explanation-flower-architecture.rst:24 #, fuzzy -msgid "Install via Docker" -msgstr "Installer Flower" +msgid "Hub-and-spoke topology in federated learning" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/explanation-flower-architecture.rst:24 +msgid "" +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "Installer la version pre-release" - -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-flower-architecture.rst:26 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -"Les nouvelles versions (éventuellement instables) de Flower sont parfois " -"disponibles en tant que versions préliminaires (alpha, bêta, release " -"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-flower-architecture.rst:31 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, les " -"versions de ``flwr`` doivent être installées avec l'option " -"``simulation``: :" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "Installer la version nightly" +#: ../../source/explanation-flower-architecture.rst:36 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" +msgstr "" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-flower-architecture.rst:38 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -"Les dernières modifications (potentiellement instables) de Flower sont " -"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-flower-architecture.rst:41 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" -"nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-monitor-simulation.rst:2 -#, fuzzy -msgid "Monitor simulation" -msgstr "Simulation de moniteur" +#: ../../source/explanation-flower-architecture.rst:47 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-flower-architecture.rst:49 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." -msgstr "" -"Flower te permet de surveiller les ressources du système pendant " -"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " -"est puissant et te permet de décider comment allouer les ressources par " -"manière de client et de limiter l'utilisation totale. Les informations " -"sur la consommation des ressources peuvent t'aider à prendre des " -"décisions plus intelligentes et à accélérer le temps d'exécution." - -#: ../../source/how-to-monitor-simulation.rst:6 -msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -"Les instructions spécifiques supposent que tu utilises macOS et que le " -"gestionnaire de paquets `Homebrew `_ est installé." - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "Téléchargements" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-flower-architecture.rst:53 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -"`Prometheus `_ est utilisé pour la collecte de " -"données, tandis que `Grafana `_ te permettra de " -"visualiser les données collectées. Ils sont tous deux bien intégrés à " -"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-flower-architecture.rst:59 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -"Écrase les fichiers de configuration (selon ton appareil, il se peut " -"qu'il soit installé sur un chemin différent)." -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "Si tu es sur un Mac M1, il devrait l'être :" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Architecture florale" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "" -"Sur les appareils Mac Intel de la génération précédente, ce devrait être " -"le cas :" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "The basic Flower architecture for federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " -"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-flower-architecture.rst:79 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -"puis supprime tout le texte du fichier et colle une nouvelle " -"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " -"intervalles de temps à tes besoins :" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-flower-architecture.rst:82 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -"Maintenant, après avoir édité la configuration de Prometheus, fais de " -"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " -"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-flower-architecture.rst:87 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " -"configuration suivante comme précédemment." -#: ../../source/how-to-monitor-simulation.rst:84 -msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "" -"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " -"suivi des métriques, maintenant, démarrons-le." +#: ../../source/explanation-flower-architecture.rst:97 +#, fuzzy +msgid "Multi-tenancy federated learning architecture" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "Suivi des mesures" +#: ../../source/explanation-flower-architecture.rst:97 +#, fuzzy +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-flower-architecture.rst:99 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " -"surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-flower-architecture.rst:104 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " -"démarres une simulation." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "Maintenant, tu es prêt à commencer ta charge de travail." +#: ../../source/explanation-flower-architecture.rst:113 +#, fuzzy +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-flower-architecture.rst:113 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -"Peu de temps après le début de la simulation, tu devrais voir les " -"journaux suivants dans ton terminal :" - -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "Tu peux tout regarder sur ``_ ." -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-flower-architecture.rst:116 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" -" panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-monitor-simulation.rst:121 -msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." -msgstr "" -"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " -"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " -"Ray n'est accessible que pendant la simulation. Une fois la simulation " -"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" -" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." +#: ../../source/explanation-flower-architecture.rst:125 +#, fuzzy +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-flower-architecture.rst:125 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" -" important car sinon ils bloqueront, par exemple, le port :code:`3000` " -"sur ta machine tant qu'ils seront en cours d'exécution." - -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "Allocation des ressources" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-flower-architecture.rst:129 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " -"efficacement les ressources du système aux clients de simulation de ton " -"côté." -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-flower-architecture.rst:132 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " -"avec toutes les ressources disponibles sur le système, qu'elle partage " -"entre les clients. Cela ne signifie pas qu'elle les divise de manière " -"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " -"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " -"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " -"suit :" - -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " -"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " -"toutes) :" - -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "Spécifions également la ressource pour un seul client." -#: ../../source/how-to-monitor-simulation.rst:205 -msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " -"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" -" lorsque les ressources le permettront." -#: ../../source/how-to-monitor-simulation.rst:207 -msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " -"ne fonctionneront pas simultanément. En définissant " -":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " -"les faire fonctionner simultanément. Fais attention à ne pas demander " -"plus de ressources que celles disponibles. Si tu as spécifié " -":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " -"as 2 GPU mais que tu as décidé d'en définir 1 dans " -":code:`ray_init_args`)." - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "FAQ" - -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "Q : Je ne vois aucune mesure enregistrée." -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-flower-architecture.rst:156 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" -" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " -"défaut). Modifie le délai pour qu'il corresponde à la période pendant " -"laquelle la simulation s'est déroulée." -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-flower-architecture.rst:161 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " -"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" -" l'onglet Métriques dans Ray Dashboard." -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "" -"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " -"services en cours d'exécution" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "Modèle FED" -#: ../../source/how-to-monitor-simulation.rst:226 -msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." -msgstr "" -"Q : Je vois \"This site can't be reached\" quand je vais sur " -"``_." +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "Table des matières" -#: ../../source/how-to-monitor-simulation.rst:228 -msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "" -"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " -"Prometheus." +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[Table des matières](#table-of-contents)" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "Ressources" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[Résumé](#résumé)" -#: ../../source/how-to-monitor-simulation.rst:234 -#, fuzzy -msgid "" -"Ray Dashboard: ``_" -msgstr "" -"Tableau de bord Ray : ``_" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[Motivation](#motivation)" -#: ../../source/how-to-monitor-simulation.rst:236 -#, fuzzy -msgid "Ray Metrics: ``_" -msgstr "" -"Ray Metrics : ``_" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[Buts](#buts)" -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[Non-objectifs](#non-objectifs)" -#: ../../source/how-to-run-flower-using-docker.rst:4 -msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[Proposition](#proposition)" -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[Inconvénients](#inconvénients)" -#: ../../source/how-to-run-flower-using-docker.rst:15 -msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[Alternatives envisagées](#alternatives-considered)" -#: ../../source/how-to-run-flower-using-docker.rst:21 -msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[Annexe](#appendix)" -#: ../../source/how-to-run-flower-using-docker.rst:27 -msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "Résumé" -#: ../../source/how-to-run-flower-using-docker.rst:32 +#: ../../source/fed/0000-20200102-fed-template.md:26 #, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "[TODO - phrase 1 : résumé du problème]" -#: ../../source/how-to-run-flower-using-docker.rst:35 +#: ../../source/fed/0000-20200102-fed-template.md:28 #, fuzzy -msgid "Quickstart" -msgstr "Démarrage rapide de JAX" - -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:43 -msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:49 -msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." -msgstr "" +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "[TODO - phrase 2 : résumé de la solution]" -#: ../../source/how-to-run-flower-using-docker.rst:53 -msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "Motivation" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +#, fuzzy +msgid "\\[TODO\\]" +msgstr "[TODO]" -#: ../../source/how-to-run-flower-using-docker.rst:65 -msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "Objectifs" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "Non-objectifs" -#: ../../source/how-to-run-flower-using-docker.rst:74 -msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "Proposition" -#: ../../source/how-to-run-flower-using-docker.rst:82 -msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "Inconvénients" -#: ../../source/how-to-run-flower-using-docker.rst:95 -msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "Alternatives envisagées" -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 +#: ../../source/fed/0000-20200102-fed-template.md:52 #, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "Collecte centralisée des données" +msgid "\\[Alternative 1\\]" +msgstr "[Alternative 1]" -#: ../../source/how-to-run-flower-using-docker.rst:102 -msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:56 +#, fuzzy +msgid "\\[Alternative 2\\]" +msgstr "[Alternative 2]" -#: ../../source/how-to-run-flower-using-docker.rst:106 -msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Doc sur l'amélioration des fleurs" -#: ../../source/how-to-run-flower-using-docker.rst:110 -msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" -#: ../../source/how-to-run-flower-using-docker.rst:128 -msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Métadonnées](#métadonnées)" -#: ../../source/how-to-run-flower-using-docker.rst:134 -#, fuzzy -msgid "Flower SuperNode" -msgstr "Serveur de Flower" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[Workflow](#workflow)" -#: ../../source/how-to-run-flower-using-docker.rst:136 -msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" -#: ../../source/how-to-run-flower-using-docker.rst:141 -msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" -#: ../../source/how-to-run-flower-using-docker.rst:147 -msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" +"Une amélioration de la fleur est un processus de développement " +"standardisé pour" -#: ../../source/how-to-run-flower-using-docker.rst:155 -msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" +"fournir une structure commune pour proposer des changements plus " +"importants" -#: ../../source/how-to-run-flower-using-docker.rst:159 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "**Fourche le dépôt de Flower**" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "s'assurer que la motivation du changement est claire" -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" +"conserver les informations sur le projet dans un système de contrôle des " +"versions" -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" +"documenter la motivation des changements qui ont un impact sur " +"l'utilisateur" -#: ../../source/how-to-run-flower-using-docker.rst:184 -msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" +"s'assurer que les participants de la communauté peuvent mener à bien les " +"changements dans le cadre d'une ou plusieurs versions et que les parties " +"prenantes sont représentées de manière adéquate tout au long du processus" -#: ../../source/how-to-run-flower-using-docker.rst:200 -msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "Par conséquent, un document d'amélioration combine des aspects de" -#: ../../source/how-to-run-flower-using-docker.rst:203 -msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "une caractéristique, et un document de suivi des efforts" -#: ../../source/how-to-run-flower-using-docker.rst:217 -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "un document sur les exigences du produit" -#: ../../source/how-to-run-flower-using-docker.rst:226 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Démarrer le serveur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "un document de conception" -#: ../../source/how-to-run-flower-using-docker.rst:228 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" +"en un seul fichier, qui est créé progressivement en collaboration avec la" +" communauté." -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Démarrer le serveur" - -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" +"Pour les changements lointains ou les fonctionnalités proposées à Flower," +" une abstraction au-delà d'une simple question GitHub ou d'une demande de" +" tirage est nécessaire pour comprendre et communiquer les changements à " +"venir dans le projet." -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +msgid "" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" +"L'objectif de ce processus est de réduire la quantité de \"connaissances " +"tribales\" dans notre communauté. En déplaçant les décisions des fils de " +"discussion Slack, des appels vidéo et des conversations de couloir vers " +"un artefact bien suivi, ce processus vise à améliorer la communication et" +" la découvrabilité." -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +msgid "" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" +"Si une amélioration doit être décrite par écrit ou verbalement à " +"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " +"créer un document d'amélioration." -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" +"De même, tout effort technique (refactorisation, changement architectural" +" majeur) qui aura un impact sur une grande partie de la communauté de " +"développement doit également être communiqué à grande échelle. Le " +"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" +" sur l'utilisateur ou l'opérateur type." -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +msgid "" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" +"Pour les petits changements et ajouts, passer par le processus " +"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " +"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " +"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " +"le fonctionnement ou l'utilisation de Flower." -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +msgid "" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" +"Les améliorations sont différentes des demandes de fonctionnalités, car " +"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " +"défendues par les membres de la communauté." -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" +"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" +" défini et un flux de travail pour examiner et stocker les documents " +"d'amélioration pour référence - le Doc d'amélioration." -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Modèle de document d'amélioration" -#: ../../source/how-to-run-flower-using-docker.rst:269 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" +"Chaque document d'amélioration est fourni sous la forme d'un fichier " +"Markdown ayant la structure suivante" -#: ../../source/how-to-run-flower-using-docker.rst:273 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" +"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " +"préambule YAML)" -#: ../../source/how-to-run-flower-using-docker.rst:283 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Titre (le même que dans les métadonnées)" -#: ../../source/how-to-run-flower-using-docker.rst:285 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table des matières (si nécessaire)" -#: ../../source/how-to-run-flower-using-docker.rst:297 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Serveur de Flower" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Contraintes/Cavats (facultatif)" -#: ../../source/how-to-run-flower-using-docker.rst:299 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Détails de la conception (facultatif)" -#: ../../source/how-to-run-flower-using-docker.rst:301 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "Critères d'obtention du diplôme" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "À titre de référence, ce document suit la structure ci-dessus." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Métadonnées" -#: ../../source/how-to-run-flower-using-docker.rst:304 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" +"**numérofed** (Obligatoire) Le `numérofed` du dernier document " +"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " +"faire référence à d'autres propositions." -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." -#: ../../source/how-to-run-flower-using-docker.rst:320 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" +"**status** (obligatoire) L'état actuel de la proposition. Voir " +"[workflow](#workflow) pour les états possibles." -#: ../../source/how-to-run-flower-using-docker.rst:324 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" +"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " +"s'agit simplement de l'identifiant GitHub." -#: ../../source/how-to-run-flower-using-docker.rst:335 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" +"**creation-date** (Obligatoire) Date à laquelle la proposition a été " +"soumise pour la première fois dans un RP." -#: ../../source/how-to-run-flower-using-docker.rst:343 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Démarrer le serveur" - -#: ../../source/how-to-run-flower-using-docker.rst:345 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" +"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" +" été modifiée de manière significative pour la dernière fois." -#: ../../source/how-to-run-flower-using-docker.rst:352 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" +"**see-also** (Facultatif) Une liste d'autres propositions qui sont " +"pertinentes par rapport à celle-ci." -#: ../../source/how-to-run-flower-using-docker.rst:357 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Démarrer le serveur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" +"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " +"remplace." -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "Flux de travail" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" +"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " +"discussion ou d'une présentation au sein de la communauté. À ce titre, " +"elle a besoin d'un champion, généralement l'auteur, qui se charge de " +"l'amélioration. Cette personne doit également trouver des committers to " +"Flower prêts à examiner la proposition." -#: ../../source/how-to-run-flower-using-docker.rst:385 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" +"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " +"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " +"document d'amélioration de la fleur, dans `enhancements`. Toutes les " +"améliorations commencent à l'état `provisionnel` dans le cadre d'une " +"demande d'extraction. Les discussions sont effectuées dans le cadre de " +"l'examen de la demande d'extraction." -#: ../../source/how-to-run-flower-using-docker.rst:389 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" +"Une fois qu'une amélioration a été examinée et approuvée, son statut " +"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " +"des demandes d'extension séparées. Ces demandes d'extension doivent " +"mentionner l'amélioration concernée dans leur description. Une fois " +"l'implémentation réalisée, le statut de la proposition passe à " +"`implémented`." -#: ../../source/how-to-run-flower-using-docker.rst:399 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" +"Sous certaines conditions, d'autres états sont possibles. Une " +"amélioration a les états suivants :" -#: ../../source/how-to-run-flower-using-docker.rst:401 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." msgstr "" +"`provisoire` : L'amélioration a été proposée et est en cours de " +"définition. C'est l'état de départ pendant que la proposition est étoffée" +" et activement définie et discutée." -#: ../../source/how-to-run-flower-using-docker.rst:412 -#, fuzzy -msgid "Advanced Docker options" -msgstr "Options d'installation avancées" - -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`implementable` : L'amélioration a été examinée et approuvée." -#: ../../source/how-to-run-flower-using-docker.rst:417 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." msgstr "" +"`implemented` : L'amélioration a été mise en œuvre et n'est plus " +"activement modifiée." -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" +"`deferred` : L'amélioration est proposée mais n'est pas activement " +"travaillée." -#: ../../source/how-to-run-flower-using-docker.rst:424 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" +"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" +" n'allait pas de l'avant." -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." -#: ../../source/how-to-run-flower-using-docker.rst:434 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" +"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " +"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" +" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:456 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" +"Élargir le modèle de proposition au-delà de la description d'une seule " +"phrase actuellement requise dans le modèle de questions sur les " +"caractéristiques peut constituer une lourde charge pour les personnes " +"dont l'anglais n'est pas la langue maternelle." -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "Questions sur GitHub" -#: ../../source/how-to-run-flower-using-docker.rst:462 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" +"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" +" pourrait utiliser, par exemple, des balises pour les différencier et les" +" filtrer par rapport aux autres problèmes. Le principal problème concerne" +" la discussion et la révision d'une amélioration : les GitHub Issues " +"n'ont qu'un seul fil de discussion pour les commentaires. Les " +"améliorations ont généralement plusieurs fils de discussion en même temps" +" pour différentes parties de la documentation. La gestion de ces " +"multiples discussions peut être déroutante lorsque l'on utilise GitHub " +"Issues." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google Docs" -#: ../../source/how-to-run-flower-using-docker.rst:467 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" +"Les Google Docs permettent de multiplier les fils de discussion. Mais " +"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " +"à ce que la communauté puisse les découvrir. Une liste de liens vers " +"toutes les propositions doit être gérée et mise à la disposition de la " +"communauté. Par rapport à l'envoi de propositions dans le cadre du " +"référentiel de Flower, le risque de liens manquants est beaucoup plus " +"élevé." -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Doc pour l'amélioration des fleurs" -#: ../../source/how-to-run-flower-using-docker.rst:483 +#: ../../source/how-to-aggregate-evaluation-results.rst:2 #, fuzzy -msgid "Setting environment variables" -msgstr "Mise en place de l'environnement de codage" +msgid "Aggregate evaluation results" +msgstr "Résultats globaux de l'évaluation." -#: ../../source/how-to-run-flower-using-docker.rst:485 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-run-simulations.rst:2 -#, fuzzy -msgid "Run simulations" -msgstr "Simulation de moniteur" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "Agréger les résultats de l'évaluation personnalisée" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"The same :code:`Strategy`-customization approach can be used to aggregate" +" custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" +"La même approche de personnalisation :code:`Stratégie` peut être utilisée" +" pour agréger les résultats d'évaluation personnalisés provenant de " +"clients individuels. Les clients peuvent renvoyer des mesures " +"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" +"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " +"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +#: ../../source/how-to-authenticate-supernodes.rst:7 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/how-to-authenticate-supernodes.rst:8 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" msgstr "" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +#: ../../source/how-to-authenticate-supernodes.rst:9 +msgid "" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +#: ../../source/how-to-authenticate-supernodes.rst:10 +msgid "SuperLink verifies the token" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 +#: ../../source/how-to-authenticate-supernodes.rst:12 #, fuzzy -msgid "VirtualClientEngine resources" -msgstr "Moteur de client virtuel" - -#: ../../source/how-to-run-simulations.rst:45 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/how-to-authenticate-supernodes.rst:15 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/how-to-authenticate-supernodes.rst:18 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/how-to-authenticate-supernodes.rst:21 +msgid "Enable node authentication in :code:`SuperLink`" msgstr "" -#: ../../source/how-to-run-simulations.rst:68 +#: ../../source/how-to-authenticate-supernodes.rst:23 msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower " +":code:`SuperLink`. Use the following terminal command to start a Flower " +":code:`SuperNode` that has both secure connections and node " +"authentication enabled:" msgstr "" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" +#: ../../source/how-to-authenticate-supernodes.rst:38 +msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/how-to-authenticate-supernodes.rst:40 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " +"file storing all known node public keys. You need to store all known node" +" public keys that are allowed to participate in a federation in one CSV " +"file (:code:`.csv`)." msgstr "" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/how-to-authenticate-supernodes.rst:42 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -#: ../../source/how-to-run-simulations.rst:94 -#, fuzzy -msgid "Simulation examples" -msgstr "Exemples de PyTorch" - -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/how-to-authenticate-supernodes.rst:44 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"The second and third flags :code:`--auth-superlink-private-key` and :code" +":`--auth-superlink-public-key` expect paths to the server's private and " +"public keys. For development purposes, you can generate a private and " +"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." msgstr "" -#: ../../source/how-to-run-simulations.rst:98 -#, fuzzy +#: ../../source/how-to-authenticate-supernodes.rst:47 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/how-to-run-simulations.rst:99 -msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +#: ../../source/how-to-authenticate-supernodes.rst:53 +msgid "Enable node authentication in :code:`SuperNode`" msgstr "" -#: ../../source/how-to-run-simulations.rst:104 -#, fuzzy -msgid "Multi-node Flower simulations" -msgstr "Simulation de moniteur" - -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/how-to-authenticate-supernodes.rst:55 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"Similar to the long-running Flower server (:code:`SuperLink`), you can " +"easily enable node authentication in the long-running Flower client " +"(:code:`SuperNode`). Use the following terminal command to start an " +"authenticated :code:`SuperNode`:" msgstr "" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." +#: ../../source/how-to-authenticate-supernodes.rst:66 +msgid "" +"The :code:`--auth-supernode-private-key` flag expects a path to the " +"node's private key file and the :code:`--auth-supernode-public-key` flag " +"expects a path to the node's public key file. For development purposes, " +"you can generate a private and public key pair using :code:`ssh-keygen -t" +" ecdsa -b 384`." msgstr "" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/how-to-authenticate-supernodes.rst:70 +msgid "Security notice" msgstr "" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-authenticate-supernodes.rst:72 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." -msgstr "" +#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:68 +#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "Conclusion" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-authenticate-supernodes.rst:79 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"You should now have learned how to start a long-running Flower server " +"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " +"authentication enabled. You should also know the significance of the " +"private key and store it safely to minimize security risks." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 -msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" -msgstr "" +#: ../../source/how-to-configure-clients.rst:2 +#, fuzzy +msgid "Configure clients" +msgstr "Configurer les clients" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" +"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " +"configuration aux clients. Les valeurs de configuration peuvent être " +"utilisées à diverses fins. Elles constituent, par exemple, un moyen " +"populaire de contrôler les hyperparamètres côté client à partir du " +"serveur." -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "" +#: ../../source/how-to-configure-clients.rst:7 +msgid "Configuration values" +msgstr "Valeurs de configuration" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" +#: ../../source/how-to-configure-clients.rst:9 +msgid "" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" +"Les valeurs de configuration sont représentées sous forme de dictionnaire" +" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " +"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " +"dans d'autres langages). Voici un exemple de dictionnaire de " +"configuration en Python :" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-configure-clients.rst:20 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" +"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " +"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " +"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-configure-clients.rst:24 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" +"Actuellement, il n'est pas possible d'envoyer directement des types de " +"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" +" dans les dictionnaires de configuration. Il existe plusieurs solutions " +"pour envoyer des collections en tant que valeurs en les convertissant en " +"l'un des types de valeurs pris en charge (et en les reconvertissant du " +"côté client)." -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-configure-clients.rst:26 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" +"On peut, par exemple, convertir une liste de nombres à virgule flottante " +"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " +"de configuration, et enfin reconvertir la chaîne JSON en une liste de " +"nombres à virgule flottante sur le client." -#: ../../source/how-to-run-simulations.rst:132 -#, fuzzy -msgid "Considerations for simulations" -msgstr "Simulation de moniteur" +#: ../../source/how-to-configure-clients.rst:30 +msgid "Configuration through built-in strategies" +msgstr "Configuration par le biais de stratégies intégrées" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-configure-clients.rst:32 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" +"called configuration functions. A configuration function is a function " +"that the built-in strategy calls to get the configuration dictionary for " +"the current round. It then forwards the configuration dictionary to all " +"the clients selected during that round." msgstr "" +"La façon la plus simple d'envoyer des valeurs de configuration aux " +"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " +"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" +" de configuration. Une fonction de configuration est une fonction que la " +"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " +"pour le tour en cours. Elle transmet ensuite le dictionnaire de " +"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-configure-clients.rst:34 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" +"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" +" la taille du lot que le client doit utiliser, (b) le cycle global actuel" +" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " +"client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-run-simulations.rst:141 -#, fuzzy -msgid "GPU resources" -msgstr "Ressources" - -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-configure-clients.rst:47 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +":code:`on_fit_config_fn`:" msgstr "" +"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " +"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" +" :code:`on_fit_config_fn` :" -#: ../../source/how-to-run-simulations.rst:146 -msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." -msgstr "" +#: ../../source/how-to-configure-clients.rst:56 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-configure-clients.rst:67 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" +"Il existe également une fonction `on_evaluate_config_fn` pour configurer " +"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " +"séparées car on peut vouloir envoyer différentes valeurs de configuration" +" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" +"Les stratégies intégrées appellent cette fonction à chaque tour " +"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " +"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" +" à chaque tour nous permet de varier/changer le dict de config au cours " +"de tours consécutifs. Si nous voulions mettre en place un calendrier " +"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " +"locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-configure-clients.rst:82 +msgid "The :code:`FedAvg` strategy will call this function *every round*." +msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." + +#: ../../source/how-to-configure-clients.rst:85 +msgid "Configuring individual clients" +msgstr "Configuration des clients individuels" + +#: ../../source/how-to-configure-clients.rst:87 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" +"Dans certains cas, il est nécessaire d'envoyer des valeurs de " +"configuration différentes à des clients différents." -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-configure-clients.rst:89 +#, fuzzy msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" +"Ceci peut être réalisé en personnalisant une stratégie existante ou en " +"`mettant en œuvre une stratégie personnalisée à partir de zéro " +"`_. " +"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " +"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" +" au config dict d'un *seul client* (uniquement le premier client de la " +"liste, les autres clients de cette série ne recevant pas cette valeur de " +"configuration \"spéciale\") :" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-configure-logging.rst:2 #, fuzzy -msgid "TensorFlow with GPUs" -msgstr "Exemples de TensorFlow" +msgid "Configure logging" +msgstr "Configurer les clients" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"containing relevant information including: log message level (e.g. " +":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " +"took place from, as well as the log message itself. In this way, the " +"logger would typically display information on your terminal as follows:" msgstr "" -#: ../../source/how-to-run-simulations.rst:179 -#, fuzzy -msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +#: ../../source/how-to-configure-logging.rst:34 +msgid "Saving log to file" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/how-to-configure-logging.rst:36 +msgid "" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do :code:`fl.server.start_server`) and when " +"using the :code:`VirtualClientEngine` (i.e. when you do " +":code:`fl.simulation.start_simulation`). In some situations you might " +"want to save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-configure-logging.rst:53 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"With the above, Flower will record the log you see on your terminal to " +":code:`log.txt`. This file will be created in the same directory as were " +"you are running the code from. If we inspect we see the log above is also" +" recorded but prefixing with :code:`identifier` each line:" msgstr "" +"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " +"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " +"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " +"log ci-dessous est également enregistré mais préfixé avec " +":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-configure-logging.rst:74 +msgid "Log your own messages" +msgstr "Loggez vos propres messages" + +#: ../../source/how-to-configure-logging.rst:76 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -#, fuzzy -msgid "Save and load model checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-configure-logging.rst:102 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -#, fuzzy -msgid "Model checkpointing" -msgstr "Point de contrôle du modèle" +#: ../../source/how-to-configure-logging.rst:128 +msgid "Log to a remote service" +msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-configure-logging.rst:130 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"The :code:`fl.common.logger.configure` function, also allows specifying a" +" host to which logs can be pushed (via :code:`POST`) through a native " +"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" +" feature in :code:`gRPC`-based Federated Learning workloads where " +"otherwise gathering logs from all entities (i.e. the server and the " +"clients) might be cumbersome. Note that in Flower simulation, the server " +"automatically displays all logs. You can still specify a " +":code:`HTTPHandler` should you wish to backup or analyze the logs " +"somewhere else." msgstr "" -"Les mises à jour du modèle peuvent être conservées côté serveur en " -"personnalisant les méthodes :code:`Strategy`. L'implémentation de " -"stratégies personnalisées est toujours possible, mais dans de nombreux " -"cas, il peut être plus pratique de simplement personnaliser une stratégie" -" existante. L'exemple de code suivant définit une nouvelle " -":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " -":code:`FedAvg` existante. En particulier, il personnalise " -":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" -" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " -"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " -"(c'est-à-dire le serveur) :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#: ../../source/how-to-enable-ssl-connections.rst:2 #, fuzzy -msgid "Save and load PyTorch checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +msgid "Enable SSL connections" +msgstr "Collecte centralisée des données" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-enable-ssl-connections.rst:4 #, fuzzy msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"This guide describes how to a SSL-enabled secure Flower server " +"(:code:`SuperLink`) can be started and how a Flower client " +"(:code:`SuperNode`) can establish a secure connections to it." msgstr "" -"Comme dans l'exemple précédent, mais avec quelques étapes " -"supplémentaires, nous allons montrer comment stocker un point de contrôle" -" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " -"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " -"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " -"transformés en ``state_dict`` PyTorch en suivant la structure de la " -"classe ``OrderedDict``." +"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " +"comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-enable-ssl-connections.rst:7 +#, fuzzy msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " -"à ton code. Note que cela va itérer sur tous les points de contrôle " -"sauvegardés et charger le plus récent :" +"Un exemple de code complet démontrant une connexion sécurisée peut être " +"trouvé ici `_." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-enable-ssl-connections.rst:10 +#, fuzzy msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"The code example comes with a :code:`README.md` file which explains how " +"to start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" +"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " +"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " +"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " +"une introduction plus approfondie sur le sujet." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "Passe à Flower 1.0" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "Certificats" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in :code:`examples/advanced-" +"tensorflow/certificates/generate.sh` with the following command sequence:" msgstr "" -"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " -"fournit une base stable pour la croissance future. Par rapport à Flower " -"0.19 (et aux autres versions de la série 0.x), il y a quelques " -"changements qui nécessitent de modifier le code des projets de la série " -"0.x existants." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "Installer la mise à jour" +"L'utilisation de connexions compatibles avec le protocole SSL nécessite " +"que des certificats soient transmis au serveur et au client. Pour les " +"besoins de ce guide, nous allons générer des certificats auto-signés. " +"Comme cela peut devenir assez complexe, nous allons te demander " +"d'exécuter le script dans :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"This will generate the certificates in :code:`examples/advanced-" +"tensorflow/.cache/certificates`." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip : ajoute ``-U`` lors de l'installation." +"Cela générera les certificats dans :code:`examples/advanced-" +"tensorflow/.cache/certificates`." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-enable-ssl-connections.rst:31 +#, fuzzy msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -"``python -m pip install -U flwr`` (lors de l'utilisation de " -"``start_server`` et ``start_client``)" +"L'approche de la génération des certificats SSL dans cet exemple peut " +"servir d'inspiration et de point de départ, mais ne doit pas être " +"considérée comme complète pour les environnements de production." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 -msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" -msgstr "" -"``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de " -"``start_simulation``)" +#: ../../source/how-to-enable-ssl-connections.rst:39 +#, fuzzy +msgid "Server (SuperLink)" +msgstr "flower-superlink" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-enable-ssl-connections.rst:41 +#, fuzzy msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." -msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" -"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " -"``start_client``)" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-enable-ssl-connections.rst:50 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" -msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " -"l'utilisation de ``start_simulation``)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "Changements nécessaires" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -"Les changements de rupture suivants nécessitent des mises à jour " -"manuelles." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "Généralités" +#: ../../source/how-to-enable-ssl-connections.rst:54 +#, fuzzy +msgid "Client (SuperNode)" +msgstr "Codes d'état du client." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:56 +#, fuzzy msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" -"Passe tous les arguments comme des arguments de mots-clés (et non comme " -"des arguments de position). Voici un exemple :" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-enable-ssl-connections.rst:64 +#, fuzzy msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +"When setting :code:`root_certificates`, the client expects a file path to" +" PEM-encoded root certificates." msgstr "" -"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," -" FlowerClient())``" +"En définissant :code:`root_certificates`, le client s'attend à recevoir " +"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " +"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " +"certificats sous forme de chaînes d'octets." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-enable-ssl-connections.rst:70 +#, fuzzy msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -"Fleur 1.0 (arguments de mots-clés) : " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Tu devrais maintenant avoir appris à générer des certificats auto-signés " +"à l'aide du script donné, à démarrer un serveur compatible SSL et à " +"demander à un client d'établir une connexion sécurisée avec lui." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "Client" +#: ../../source/how-to-enable-ssl-connections.rst:75 +#, fuzzy +msgid "Additional resources" +msgstr "Ressources supplémentaires" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:77 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" -"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " -"en ``def get_parameters(self, config):``" +"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " +"approfondir le sujet des certificats :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 -msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" -msgstr "" -"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " -"``def get_parameters(self, ins : GetParametersIns):``" +#: ../../source/how-to-enable-ssl-connections.rst:79 +msgid "`Let's Encrypt `_" +msgstr "`Let's Encrypt `_" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "`certbot `_" +msgstr "`certbot `_" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-implement-strategies.rst:2 +#, fuzzy +msgid "Implement strategies" +msgstr "Mettre en place des stratégies" + +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" -" ``start_simulation``. Voici un exemple :" +"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " +"entièrement personnalisées. Une stratégie est essentiellement " +"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " +"stratégies décident comment échantillonner les clients, comment " +"configurer les clients pour la formation, comment agréger les mises à " +"jour et comment évaluer les modèles. Flower fournit quelques stratégies " +"intégrées qui sont basées sur la même API que celle décrite ci-dessous." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The :code:`Strategy` abstraction" +msgstr "L'abstraction :code:`Stratégie`" + +#: ../../source/how-to-implement-strategies.rst:13 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"All strategy implementation are derived from the abstract base class " +":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}, ...)``" +"Toutes les implémentations de stratégies sont dérivées de la classe de " +"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " +"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " +"que les implémentations de stratégies personnalisées ont exactement les " +"mêmes capacités à leur disposition que les implémentations intégrées." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"L'abstraction de la stratégie définit quelques méthodes abstraites qui " +"doivent être mises en œuvre :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-implement-strategies.rst:75 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"Creating a new strategy means implementing a new :code:`class` (derived " +"from the abstract base class :code:`Strategy`) that implements for the " +"previously shown abstract methods:" msgstr "" -"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " -"``config=ServerConfig(...)`` (voir point précédent)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 -msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." -msgstr "" -"Supprime le paramètre ``force_final_distributed_eval`` des appels à " -"``start_server``. L'évaluation distribuée sur tous les clients peut être " -"activée en configurant la stratégie pour échantillonner tous les clients " -"pour l'évaluation après le dernier tour de formation." +"La création d'une nouvelle stratégie implique la mise en œuvre d'une " +"nouvelle :code:`classe` (dérivée de la classe de base abstraite " +":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " +"précédemment :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" +#: ../../source/how-to-implement-strategies.rst:100 +msgid "The Flower server calls these methods in the following order:" +msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The following sections describe each of those methods in more detail." +msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" +#: ../../source/how-to-implement-strategies.rst:180 +msgid "The :code:`initialize_parameters` method" +msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-implement-strategies.rst:182 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +":code:`initialize_parameters` is called only once, at the very beginning " +"of an execution. It is responsible for providing the initial global model" +" parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" -"Initialisation de la stratégie : si la stratégie repose sur les valeurs " -"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " -"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " -"projets qui ne créent pas manuellement une stratégie (en appelant " -"``start_server` ou ``start_simulation`` sans passer une instance de " -"stratégie) doivent maintenant initialiser manuellement FedAvg avec " -"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " +"début d'une exécution. Il est chargé de fournir les paramètres initiaux " +"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " +"d'un objet :code:`Parameters`)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-implement-strategies.rst:184 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +":code:`FedAvg`:" msgstr "" -"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " -"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," -" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" +"Les stratégies intégrées renvoient les paramètres initiaux fournis par " +"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " +"peuvent être transmis à :code:`FedAvg` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-implement-strategies.rst:209 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The Flower server will call :code:`initialize_parameters`, which either " +"returns the parameters that were passed to :code:`initial_parameters`, or" +" :code:`None`. If no parameters are returned from " +":code:`initialize_parameters` (i.e., :code:`None`), the server will " +"randomly select one client and ask it to provide its parameters. This is " +"a convenience feature and not recommended in practice, but it can be " +"useful for prototyping. In practice, it is recommended to always use " +"server-side parameter initialization." msgstr "" -"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" +"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " +"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" +" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " +":code:`None`), le serveur sélectionne au hasard un client et lui demande " +"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " +"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " +"prototypage. Dans la pratique, il est recommandé de toujours utiliser " +"l'initialisation des paramètres du côté du serveur." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-implement-strategies.rst:213 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " -"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"L'initialisation des paramètres côté serveur est un mécanisme puissant. " +"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " +"partir d'un point de contrôle précédemment sauvegardé. C'est également la" +" capacité fondamentale nécessaire pour mettre en œuvre des approches " +"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " +"l'apprentissage fédéré." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "Stratégies personnalisées" +#: ../../source/how-to-implement-strategies.rst:216 +msgid "The :code:`configure_fit` method" +msgstr "La méthode :code:`configure_fit`" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +":code:`configure_fit` is responsible for configuring the upcoming round " +"of training. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" -"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " -"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " -"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " -"BaseException]]`` (dans ``aggregate_evaluate``)" +":code:`configure_fit` est chargé de configurer le prochain tour de " +"formation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_fit` l'indique clairement :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-implement-strategies.rst:231 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_fit`:" msgstr "" -"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre :" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_fit` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-implement-strategies.rst:233 +#: ../../source/how-to-implement-strategies.rst:280 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Use the :code:`client_manager` to randomly sample all (or a subset of) " +"available clients (each represented as a :code:`ClientProxy` object)" msgstr "" -"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" +"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " +"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " +"par un objet :code:`ClientProxy`)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-implement-strategies.rst:234 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" msgstr "" -"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "Améliorations facultatives" +"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " +"modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-implement-strategies.rst:236 +#, fuzzy msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"More sophisticated implementations can use :code:`configure_fit` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_fit`." msgstr "" -"En plus des changements nécessaires mentionnés ci-dessus, il existe un " -"certain nombre d'améliorations potentielles qui viennent d'être rendues " -"possibles :" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_fit` pour mettre en œuvre une logique de sélection des " +"clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_fit`." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-implement-strategies.rst:240 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" -"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " -"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " -"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " -"sont plus nécessaires." +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client, ce qui " +"permet d'élaborer des stratégies personnalisées pour former, par exemple," +" différents modèles sur différents clients, ou utiliser différents " +"hyperparamètres sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-implement-strategies.rst:243 +msgid "The :code:`aggregate_fit` method" +msgstr "La méthode :code:`aggregate_fit` (agrégation)" + +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +":code:`aggregate_fit` is responsible for aggregating the results returned" +" by the clients that were selected and asked to train in " +":code:`configure_fit`." msgstr "" -"Configurez le délai d'attente de la ronde via ``start_simulation`` : " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" +" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " +"dans :code:`configure_fit`." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "Aide supplémentaire" +#: ../../source/how-to-implement-strategies.rst:258 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." +msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_fit`). " +":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " +"aussi une liste de :code:`échecs`." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-implement-strategies.rst:260 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" +" dictionary of aggregated metrics. The :code:`Parameters` return value is" +" optional because :code:`aggregate_fit` might decide that the results " +"provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." +":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " +"un dictionnaire de métriques agrégées. La valeur de retour " +":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" +" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " +"exemple, trop d'échecs)." -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -#, fuzzy -msgid "Upgrade to Flower Next" -msgstr "Passe à Flower 1.0" +#: ../../source/how-to-implement-strategies.rst:263 +msgid "The :code:`configure_evaluate` method" +msgstr "La méthode :code:`configure_evaluate` (en anglais)" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-implement-strategies.rst:265 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +":code:`configure_evaluate` is responsible for configuring the upcoming " +"round of evaluation. What does *configure* mean in this context? " +"Configuring a round means selecting clients and deciding what " +"instructions to send to these clients. The signature of " +":code:`configure_evaluate` makes this clear:" msgstr "" +":code:`configure_evaluate` est chargé de configurer le prochain tour " +"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" +" :" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-implement-strategies.rst:278 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_evaluate`:" msgstr "" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +#: ../../source/how-to-implement-strategies.rst:281 +msgid "" +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " +"the current global model :code:`parameters` and :code:`config` dict" msgstr "" +"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " +"le modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-implement-strategies.rst:283 #, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +"More sophisticated implementations can use :code:`configure_evaluate` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_evaluate`." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " +"des clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_evaluate`." -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-implement-strategies.rst:287 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client. Cela " +"permet aux stratégies personnalisées d'évaluer, par exemple, différents " +"modèles sur différents clients, ou d'utiliser différents hyperparamètres " +"sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-implement-strategies.rst:291 +msgid "The :code:`aggregate_evaluate` method" +msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" + +#: ../../source/how-to-implement-strategies.rst:293 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +":code:`aggregate_evaluate` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +":code:`configure_evaluate`." msgstr "" +":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " +"par les clients qui ont été sélectionnés et à qui l'on a demandé " +"d'évaluer dans :code:`configure_evaluate`." -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +#: ../../source/how-to-implement-strategies.rst:306 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " +"receives a list of :code:`results`, but also a list of :code:`failures`." msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_evaluate`). " +":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " +"mais aussi une liste d' :code:`échecs`." -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -#, fuzzy -msgid "Using Poetry" -msgstr "Utiliser la poésie (recommandé)" - -#: ../../source/how-to-upgrade-to-flower-next.rst:84 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:308 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" +" dictionary of aggregated metrics. The :code:`float` return value is " +"optional because :code:`aggregate_evaluate` might decide that the results" +" provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." +":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" +" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " +"est facultative car :code:`aggregate_evaluate` peut décider que les " +"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " +"trop d'échecs)." -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -#, fuzzy -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." +#: ../../source/how-to-implement-strategies.rst:311 +msgid "The :code:`evaluate` method" +msgstr "La méthode :code:`évaluer`" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-implement-strategies.rst:313 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +":code:`evaluate` is responsible for evaluating model parameters on the " +"server-side. Having :code:`evaluate` in addition to " +":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " +"to perform both servers-side and client-side (federated) evaluation." msgstr "" +"le fait d'avoir :code:`evaluate` en plus de " +":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " +"stratégies d'effectuer des évaluations à la fois côté serveur et côté " +"client (fédéré)." -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -#, fuzzy -msgid "|clientapp_link|_" -msgstr "client" - -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-implement-strategies.rst:323 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +":code:`evaluate` method might not complete successfully (e.g., it might " +"fail to load the server-side evaluation data)." msgstr "" +"La valeur de retour est à nouveau facultative parce que la stratégie peut" +" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " +"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " +"pas se terminer avec succès (par exemple, elle peut échouer à charger les" +" données de l'évaluation côté serveur)." -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-install-flower.rst:2 #, fuzzy -msgid "|serverapp_link|_" -msgstr "serveur" +msgid "Install Flower" +msgstr "Installer Flower" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 -msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" -msgstr "" +#: ../../source/how-to-install-flower.rst:6 +#, fuzzy +msgid "Python version" +msgstr "Version Python" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" -msgstr "" +#: ../../source/how-to-install-flower.rst:12 +msgid "Install stable release" +msgstr "Installe la version stable" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 -msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +#: ../../source/how-to-install-flower.rst:15 +#: ../../source/how-to-upgrade-to-flower-next.rst:46 +msgid "Using pip" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-install-flower.rst:17 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"Stable releases are available on `PyPI " +"`_::" msgstr "" +"Les versions stables sont disponibles sur `PyPI " +"`_: :" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-install-flower.rst:21 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra::" msgstr "" +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " +"doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -#, fuzzy -msgid "Simulation in CLI" -msgstr "Simulation de moniteur" +#: ../../source/how-to-install-flower.rst:27 +msgid "Using conda (or mamba)" +msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 -msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +#: ../../source/how-to-install-flower.rst:29 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-install-flower.rst:31 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-install-flower.rst:36 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" +#: ../../source/how-to-install-flower.rst:40 +msgid "or with ``mamba``::" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 -msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" -msgstr "" +#: ../../source/how-to-install-flower.rst:46 +msgid "Verify installation" +msgstr "Vérifie l'installation" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-install-flower.rst:48 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." +"La commande suivante peut être utilisée pour vérifier si Flower a été " +"installé avec succès. Si tout a fonctionné, la version de Flower devrait " +"être imprimée sur la ligne de commande: :" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -#, fuzzy -msgid "Important" -msgstr "Changements importants :" +#: ../../source/how-to-install-flower.rst:58 +msgid "Advanced installation options" +msgstr "Options d'installation avancées" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 -msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "" +#: ../../source/how-to-install-flower.rst:61 +#, fuzzy +msgid "Install via Docker" +msgstr "Installer Flower" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" +#: ../../source/how-to-install-flower.rst:63 +msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "" +#: ../../source/how-to-install-flower.rst:66 +msgid "Install pre-release" +msgstr "Installer la version pre-release" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-install-flower.rst:68 msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens::" msgstr "" +"Les nouvelles versions (éventuellement instables) de Flower sont parfois " +"disponibles en tant que versions préliminaires (alpha, bêta, release " +"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-install-flower.rst:72 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra::" msgstr "" +"Pour les simulations qui utilisent le moteur de client virtuel, les " +"versions de ``flwr`` doivent être installées avec l'option " +"``simulation``: :" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" -msgstr "" +#: ../../source/how-to-install-flower.rst:77 +msgid "Install nightly release" +msgstr "Installer la version nightly" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-install-flower.rst:79 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases::" msgstr "" +"Les dernières modifications (potentiellement instables) de Flower sont " +"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" +#: ../../source/how-to-install-flower.rst:83 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra::" msgstr "" +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" +"nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:2 +#, fuzzy +msgid "Monitor simulation" +msgstr "Simulation de moniteur" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" +"Flower te permet de surveiller les ressources du système pendant " +"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " +"est puissant et te permet de décider comment allouer les ressources par " +"manière de client et de limiter l'utilisation totale. Les informations " +"sur la consommation des ressources peuvent t'aider à prendre des " +"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" +#: ../../source/how-to-monitor-simulation.rst:6 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" +"Les instructions spécifiques supposent que tu utilises macOS et que le " +"gestionnaire de paquets `Homebrew `_ est installé." -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:10 +msgid "Downloads" +msgstr "Téléchargements" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +#: ../../source/how-to-monitor-simulation.rst:16 +msgid "" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" +"`Prometheus `_ est utilisé pour la collecte de " +"données, tandis que `Grafana `_ te permettra de " +"visualiser les données collectées. Ils sont tous deux bien intégrés à " +"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-monitor-simulation.rst:18 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" +"Écrase les fichiers de configuration (selon ton appareil, il se peut " +"qu'il soit installé sur un chemin différent)." -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +#: ../../source/how-to-monitor-simulation.rst:20 +msgid "If you are on an M1 Mac, it should be:" +msgstr "Si tu es sur un Mac M1, il devrait l'être :" + +#: ../../source/how-to-monitor-simulation.rst:27 +msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" +"Sur les appareils Mac Intel de la génération précédente, ce devrait être " +"le cas :" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-monitor-simulation.rst:34 msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" msgstr "" +"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " +"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-use-built-in-mods.rst:72 -#, fuzzy -msgid "Order of execution" -msgstr "Dépréciations" - -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-monitor-simulation.rst:44 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" +"puis supprime tout le texte du fichier et colle une nouvelle " +"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " +"intervalles de temps à tes besoins :" -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" +#: ../../source/how-to-monitor-simulation.rst:59 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" +"Maintenant, après avoir édité la configuration de Prometheus, fais de " +"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " +"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +#: ../../source/how-to-monitor-simulation.rst:69 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." msgstr "" +"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " +"configuration suivante comme précédemment." -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-monitor-simulation.rst:84 msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." msgstr "" +"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " +"suivi des métriques, maintenant, démarrons-le." -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:88 +msgid "Tracking metrics" +msgstr "Suivi des mesures" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" +#: ../../source/how-to-monitor-simulation.rst:90 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." msgstr "" +"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " +"surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-monitor-simulation.rst:97 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"Please include the following argument in your Python code when starting a" +" simulation." msgstr "" +"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " +"démarres une simulation." -#: ../../source/how-to-use-built-in-mods.rst:87 -msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "Now, you are ready to start your workload." +msgstr "Maintenant, tu es prêt à commencer ta charge de travail." -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +#: ../../source/how-to-monitor-simulation.rst:110 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" +"Peu de temps après le début de la simulation, tu devrais voir les " +"journaux suivants dans ton terminal :" -#: ../../source/how-to-use-differential-privacy.rst:2 -#, fuzzy -msgid "Use Differential Privacy" -msgstr "Confidentialité différentielle" +#: ../../source/how-to-monitor-simulation.rst:117 +msgid "You can look at everything at ``_ ." +msgstr "Tu peux tout regarder sur ``_ ." -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" +"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" +" panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" +"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " +"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " +"Ray n'est accessible que pendant la simulation. Une fois la simulation " +"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" +" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-monitor-simulation.rst:123 msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port :code:`3000` on " +"your machine as long as they are running." msgstr "" +"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" +" important car sinon ils bloqueront, par exemple, le port :code:`3000` " +"sur ta machine tant qu'ils seront en cours d'exécution." -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "Resource allocation" +msgstr "Allocation des ressources" + +#: ../../source/how-to-monitor-simulation.rst:134 msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" +"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " +"efficacement les ressources du système aux clients de simulation de ton " +"côté." -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-monitor-simulation.rst:136 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" +"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " +"avec toutes les ressources disponibles sur le système, qu'elle partage " +"entre les clients. Cela ne signifie pas qu'elle les divise de manière " +"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " +"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " +"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " +"suit :" -#: ../../source/how-to-use-differential-privacy.rst:21 -#, fuzzy -msgid "Server-side Clipping" -msgstr "Logique côté serveur" +#: ../../source/how-to-monitor-simulation.rst:143 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-monitor-simulation.rst:155 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" +"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " +"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " +"toutes) :" -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "server side clipping" -msgstr "Logique côté serveur" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "Let’s also specify the resource for a single client." +msgstr "Spécifions également la ressource pour un seul client." -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-monitor-simulation.rst:205 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" +"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " +"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" +" lorsque les ressources le permettront." -#: ../../source/how-to-use-differential-privacy.rst:52 -#, fuzzy -msgid "Client-side Clipping" -msgstr "Logique côté client" - -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-monitor-simulation.rst:207 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " +"running two clients and therefore enable them to run concurrently. Be " +"careful not to require more resources than available. If you specified " +":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " +"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." msgstr "" +"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " +"ne fonctionneront pas simultanément. En définissant " +":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " +"les faire fonctionner simultanément. Fais attention à ne pas demander " +"plus de ressources que celles disponibles. Si tu as spécifié " +":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " +"as 2 GPU mais que tu as décidé d'en définir 1 dans " +":code:`ray_init_args`)." -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "client side clipping" -msgstr "Logique côté client" +#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "FAQ" -#: ../../source/how-to-use-differential-privacy.rst:63 -msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:214 +msgid "Q: I don't see any metrics logged." +msgstr "Q : Je ne vois aucune mesure enregistrée." -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-monitor-simulation.rst:216 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" +"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" +" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " +"défaut). Modifie le délai pour qu'il corresponde à la période pendant " +"laquelle la simulation s'est déroulée." -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-monitor-simulation.rst:218 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" +"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " +"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" +" l'onglet Métriques dans Ray Dashboard." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +#: ../../source/how-to-monitor-simulation.rst:220 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" +"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " +"services en cours d'exécution" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-monitor-simulation.rst:226 +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"``_." msgstr "" +"Q : Je vois \"This site can't be reached\" quand je vais sur " +"``_." -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-monitor-simulation.rst:228 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." msgstr "" +"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " +"Prometheus." -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" -msgstr "" +#: ../../source/how-to-monitor-simulation.rst:232 +msgid "Resources" +msgstr "Ressources" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"Ray Dashboard: ``_" msgstr "" +"Tableau de bord Ray : ``_" -#: ../../source/how-to-use-strategies.rst:2 +#: ../../source/how-to-monitor-simulation.rst:236 #, fuzzy -msgid "Use strategies" -msgstr "Stratégies personnalisées" +msgid "Ray Metrics: ``_" +msgstr "" +"Ray Metrics : ``_" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-run-simulations.rst:2 +#, fuzzy +msgid "Run simulations" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-run-simulations.rst:8 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -"Flower permet une personnalisation complète du processus d'apprentissage " -"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " -"intégrées sont fournies dans le cadre principal." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-run-simulations.rst:10 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" msgstr "" -"Il y a trois façons de personnaliser la manière dont Flower orchestre le " -"processus d'apprentissage du côté du serveur :" - -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" - -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "Personnalise une stratégie existante avec des fonctions de rappel" - -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "Mets en place une nouvelle stratégie" - -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "Utilise une stratégie existante" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-run-simulations.rst:12 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " -"populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-run-simulations.rst:13 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." msgstr "" -"Cela crée une stratégie dont tous les paramètres sont laissés à leur " -"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " -"est généralement recommandé d'ajuster quelques paramètres lors de " -"l'instanciation :" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-run-simulations.rst:14 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -"Les stratégies existantes offrent plusieurs façons de personnaliser leur " -"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" -" le code fourni par l'utilisateur pendant l'exécution." - -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "Configurer l'adaptation et l'évaluation du client" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-run-simulations.rst:16 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." msgstr "" -"Le serveur peut transmettre de nouvelles valeurs de configuration au " -"client à chaque tour en fournissant une fonction à " -":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " -"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " -"de configuration qui seront envoyées au client. Elle doit renvoyer un " -"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " -"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " -"fédéré." -#: ../../source/how-to-use-strategies.rst:75 -msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" msgstr "" -"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " -"configuration arbitraires du serveur au client, et changer poétiquement " -"ces valeurs à chaque tour, par exemple pour ajuster le taux " -"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " -":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-run-simulations.rst:22 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -"Comme pour :code:`on_fit_config_fn`, il existe aussi " -":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" -" à :code:`client.evaluate()`" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "Configuration de l'évaluation côté serveur" +#: ../../source/how-to-run-simulations.rst:44 +#, fuzzy +msgid "VirtualClientEngine resources" +msgstr "Moteur de client virtuel" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-run-simulations.rst:45 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +":code:`ray_init_args` input argument to :code:`start_simulation` which " +"the VCE internally passes to Ray's :code:`ray.init` command. For a " +"complete list of settings you can configure check the `ray.init " +"`_" +" documentation. Do not set :code:`ray_init_args` if you want the VCE to " +"use all your system's CPUs and GPUs." msgstr "" -"L'évaluation côté serveur peut être activée en passant une fonction " -"d'évaluation à :code:`evaluate_fn`." -#: ../../source/how-to-use-strategies.rst:89 -#, fuzzy -msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +#: ../../source/how-to-run-simulations.rst:62 +msgid "Assigning client resources" msgstr "" -"L'écriture d'une stratégie entièrement personnalisée est un peu plus " -"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " -"`Implémentation des stratégies `_ pour " -"en savoir plus." -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "Tutoriel" +#: ../../source/how-to-run-simulations.rst:63 +msgid "" +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" +" nothing else) to each virtual client. This means that if your system has" +" 10 cores, that many virtual clients can be concurrently running." +msgstr "" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "Quickstart tutorials" +#: ../../source/how-to-run-simulations.rst:65 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "Guides" +#: ../../source/how-to-run-simulations.rst:67 +msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +msgstr "" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-run-simulations.rst:68 +msgid "" +":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " +"assigned." msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "Explications" +#: ../../source/how-to-run-simulations.rst:70 +msgid "Let's see a few examples:" +msgstr "" -#: None:-1 -msgid "API reference" -msgstr "Référence pour l'API" +#: ../../source/how-to-run-simulations.rst:89 +msgid "" +"While the :code:`client_resources` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +":code:`VirtualClientEngine` will schedule 100 jobs to run (each " +"simulating a client sampled by the strategy) and then will execute them " +"in a resource-aware manner in batches of 8." +msgstr "" -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "Référence pour la documentation" +#: ../../source/how-to-run-simulations.rst:91 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." +msgstr "" -#: ../../source/index.rst:153 +#: ../../source/how-to-run-simulations.rst:94 #, fuzzy -msgid "Contributor tutorials" -msgstr "Configuration du contributeur" +msgid "Simulation examples" +msgstr "Exemples de PyTorch" -#: ../../source/index.rst:160 -#, fuzzy -msgid "Contributor how-to guides" -msgstr "Guide pour les contributeurs" - -#: ../../source/index.rst:172 -#, fuzzy -msgid "Contributor explanations" -msgstr "Explications" - -#: ../../source/index.rst:178 -#, fuzzy -msgid "Contributor references" -msgstr "Configuration du contributeur" - -#: ../../source/index.rst:-1 +#: ../../source/how-to-run-simulations.rst:96 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/index.rst:2 +#: ../../source/how-to-run-simulations.rst:98 #, fuzzy -msgid "Flower Framework Documentation" -msgstr "Rédiger de la documentation" +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/index.rst:7 +#: ../../source/how-to-run-simulations.rst:99 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -"Bienvenue sur la documentation de Flower. `Flower `_ " -"est un framework de federated learning convivial et facile à utiliser." -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "Rejoignez la communauté de Flower" +#: ../../source/how-to-run-simulations.rst:104 +#, fuzzy +msgid "Multi-node Flower simulations" +msgstr "Simulation de moniteur" -#: ../../source/index.rst:13 +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " +"across multiple compute nodes. Before starting your multi-node simulation" +" ensure that you:" msgstr "" -"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" -" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " -"autres hobbyistes." -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "Join us on Slack" +#: ../../source/how-to-run-simulations.rst:108 +msgid "Have the same Python environment in all nodes." +msgstr "" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower Framework" +#: ../../source/how-to-run-simulations.rst:109 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "" -#: ../../source/index.rst:25 +#: ../../source/how-to-run-simulations.rst:110 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " -"veulent utiliser Flower pour transposer des workloads de Machine Learning" -" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " -"cela le plus evident possible. Lisez la suite pour en apprendre plus." - -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "Tutoriels" -#: ../../source/index.rst:32 +#: ../../source/how-to-run-simulations.rst:111 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " +"`_ so the " +":code:`VirtualClientEngine` attaches to a running Ray instance." msgstr "" -"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " -"débuter." -#: ../../source/index.rst:61 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:112 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"Start Ray on you head node: on the terminal type :code:`ray start " +"--head`. This command will print a few lines, one of which indicates how " +"to attach other nodes to the head node." msgstr "" -"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " -":ref:`TensorFlow ` | :ref:`🤗 Transformers " -"` | :ref:`JAX ` | :ref:`Pandas " -"` | :ref:`fastai ` | :ref:`PyTorch " -"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " -"` | :ref:`Android ` | :ref:`iOS " -"`" -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +#: ../../source/how-to-run-simulations.rst:113 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +":code:`ray start --address='192.168.1.132:6379'`" msgstr "" -#: ../../source/index.rst:68 -#, fuzzy -msgid "And TensorFlow:" -msgstr "Exemples de TensorFlow" - -#: ../../source/index.rst:76 +#: ../../source/how-to-run-simulations.rst:115 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -"Guides orientés sur la résolutions étapes par étapes de problèmes ou " -"objectifs specifiques." -#: ../../source/index.rst:110 +#: ../../source/how-to-run-simulations.rst:117 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command :code:`ray stop` in each node's " +"terminal (including the head node)." msgstr "" -"Guides orientés sur la compréhension et l'explication des sujets et idées" -" de fonds sur lesquels sont construits Flower et l'IA collaborative." - -#: ../../source/index.rst:120 -#, fuzzy -msgid "References" -msgstr "Référence" - -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-run-simulations.rst:120 +msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-run-simulations.rst:122 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: ../../source/index.rst:148 -#, fuzzy -msgid "Contributor docs" -msgstr "Configuration du contributeur" - -#: ../../source/index.rst:150 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:124 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"User :code:`ray status` to check all nodes connected to your head node as" +" well as the total resources available to the " +":code:`VirtualClientEngine`." msgstr "" -"Les auteurs de Flower sont heureux d'accueillir des contributions " -"externes. Les guides suivant sont là pour vous accompagner dans cette " -"direction." -#: ../../source/ref-api-cli.rst:2 -#, fuzzy -msgid "Flower CLI reference" -msgstr "Client de Flower" +#: ../../source/how-to-run-simulations.rst:126 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +":code:`VirtualClientEngine` can schedule as many `virtual` clients as " +"that node can possible run. In some settings you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"`--num-cpus=` and/or `--num-" +"gpus=` in any :code:`ray start` command (including " +"when starting the head)" +msgstr "" -#: ../../source/ref-api-cli.rst:7 +#: ../../source/how-to-run-simulations.rst:132 #, fuzzy -msgid "flower-simulation" +msgid "Considerations for simulations" msgstr "Simulation de moniteur" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" - -#: ../../source/ref-api-cli.rst:27 -#, fuzzy -msgid "flower-client-app" -msgstr "Flower ClientApp." - -#: ../../source/ref-api-cli.rst:37 -#, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.rst:2 -#, fuzzy -msgid "flwr" -msgstr "Fleur" - -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" +#: ../../source/how-to-run-simulations.rst:135 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-run-simulations.rst:138 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +#: ../../source/how-to-run-simulations.rst:141 #, fuzzy -msgid "Flower client." -msgstr "Client de Flower" +msgid "GPU resources" +msgstr "Ressources" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-run-simulations.rst:143 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "Composants communs partagés entre le serveur et le client." - -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-run-simulations.rst:146 +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set :code:`num_gpus=0.5` and you have two GPUs in your system with " +"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" +" concurrently." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -#, fuzzy -msgid "Flower server." -msgstr "Serveur de Flower" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-run-simulations.rst:149 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -#, fuzzy -msgid "Flower simulation." -msgstr "Simulation de moniteur" +#: ../../source/how-to-run-simulations.rst:150 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " +"experiment." +msgstr "" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "client" +#: ../../source/how-to-run-simulations.rst:153 +msgid "" +"In addition, the GPU resource limits passed to :code:`client_resources` " +"are not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 +#: ../../source/how-to-run-simulations.rst:156 #, fuzzy -msgid "Functions" -msgstr "Les quatre fonctions :" +msgid "TensorFlow with GPUs" +msgstr "Exemples de TensorFlow" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:158 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -#, fuzzy -msgid "Run Flower client app." -msgstr "Client de Flower" +#: ../../source/how-to-run-simulations.rst:160 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " +"in order to specify a function to be executed upon actor initialization. " +"In this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:179 #, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -#, fuzzy -msgid "Run Flower SuperNode." -msgstr "Serveur de Flower" +#: ../../source/how-to-run-simulations.rst:183 +msgid "Multi-node setups" +msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:185 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +#, fuzzy +msgid "Save and load model checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +#, fuzzy +msgid "Model checkpointing" +msgstr "Point de contrôle du modèle" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +":code:`Strategy` methods. Implementing custom strategies is always an " +"option, but for many cases it may be more convenient to simply customize " +"an existing strategy. The following code example defines a new " +":code:`SaveModelStrategy` which customized the existing built-in " +":code:`FedAvg` strategy. In particular, it customizes " +":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " +"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" +" before it returns those aggregated weights to the caller (i.e., the " +"server):" msgstr "" +"Les mises à jour du modèle peuvent être conservées côté serveur en " +"personnalisant les méthodes :code:`Strategy`. L'implémentation de " +"stratégies personnalisées est toujours possible, mais dans de nombreux " +"cas, il peut être plus pratique de simplement personnaliser une stratégie" +" existante. L'exemple de code suivant définit une nouvelle " +":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " +":code:`FedAvg` existante. En particulier, il personnalise " +":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" +" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " +"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " +"(c'est-à-dire le serveur) :" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#, fuzzy +msgid "Save and load PyTorch checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#, fuzzy +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" +"Comme dans l'exemple précédent, mais avec quelques étapes " +"supplémentaires, nous allons montrer comment stocker un point de contrôle" +" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " +"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " +"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " +"transformés en ``state_dict`` PyTorch en suivant la structure de la " +"classe ``OrderedDict``." -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" +"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " +"à ton code. Note que cela va itérer sur tous les points de contrôle " +"sauvegardés et charger le plus récent :" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Passe à Flower 1.0" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" +"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " +"fournit une base stable pour la croissance future. Par rapport à Flower " +"0.19 (et aux autres versions de la série 0.x), il y a quelques " +"changements qui nécessitent de modifier le code des projets de la série " +"0.x existants." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -#, fuzzy -msgid "Flower ClientApp." -msgstr "Flower ClientApp." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 +#: ../../source/how-to-upgrade-to-flower-next.rst:43 +msgid "Install update" +msgstr "Installer la mise à jour" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "pip: add ``-U`` when installing." +msgstr "pip : ajoute ``-U`` lors de l'installation." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" +"``python -m pip install -U flwr`` (lors de l'utilisation de " +"``start_server`` et ``start_client``)" -#: ../../source/ref-api/flwr.client.rst:52::1 -#, fuzzy -msgid ":py:obj:`flwr.client.mod `\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +msgid "" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "" +"``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de" +" ``start_simulation``)" -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -#, fuzzy -msgid "Flower Built-in Mods." -msgstr "Client de Flower" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" +"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " +"``start_client``)" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " +"l'utilisation de ``start_simulation``)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-next.rst:100 +msgid "Required changes" +msgstr "Changements nécessaires" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +msgid "The following breaking changes require manual updates." msgstr "" +"Les changements de rupture suivants nécessitent des mises à jour " +"manuelles." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -#, fuzzy -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +msgid "General" +msgstr "Généralités" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" +"Passe tous les arguments comme des arguments de mots-clés (et non comme " +"des arguments de position). Voici un exemple :" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" +"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," +" FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" +"Fleur 1.0 (arguments de mots-clés) : " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -#, fuzzy -msgid "Get the run context from this client." -msgstr "Évaluer la réponse d'un client." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "Client" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" +"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " +"en ``def get_parameters(self, config):``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -#, fuzzy -msgid "Return the current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" - -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" +"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " +"``def get_parameters(self, ins : GetParametersIns):``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" +"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" +" ``start_simulation``. Voici un exemple :" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}, ...)``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" +"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " +"``config=ServerConfig(...)`` (voir point précédent)" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" +"Supprime le paramètre ``force_final_distributed_eval`` des appels à " +"``start_server``. L'évaluation distribuée sur tous les clients peut être " +"activée en configurant la stratégie pour échantillonner tous les clients " +"pour l'évaluation après le dernier tour de formation." -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" +"Initialisation de la stratégie : si la stratégie repose sur les valeurs " +"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " +"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " +"projets qui ne créent pas manuellement une stratégie (en appelant " +"``start_server` ou ``start_simulation`` sans passer une instance de " +"stratégie) doivent maintenant initialiser manuellement FedAvg avec " +"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -#, fuzzy -msgid "Parameters" -msgstr "Paramètres du modèle." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" -#: flwr.client.client.Client.evaluate:3 of -msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -#, fuzzy -msgid "Returns" -msgstr "Ressources" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients` --> ``min_evaluate_clients``" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " +"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," +" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " +"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "Stratégies personnalisées" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" +"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " +"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " +"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " +"BaseException]]`` (dans ``aggregate_evaluate``)" -#: flwr.client.client.Client.get_parameters:7 of -#, fuzzy -msgid "The current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" - -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" +"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre :" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -#, fuzzy -msgid "ClientApp" -msgstr "client" - -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -#, fuzzy -msgid "Examples" -msgstr "Exemples de PyTorch" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "Améliorations facultatives" -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" +"En plus des changements nécessaires mentionnés ci-dessus, il existe un " +"certain nombre d'améliorations potentielles qui viennent d'être rendues " +"possibles :" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "" - -#: flwr.client.client_app.ClientApp:21 of -msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." -msgstr "" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" +"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " +"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " +"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " +"sont plus nécessaires." -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" +"Configurez le délai d'attente de la ronde via ``start_simulation`` : " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +#: ../../source/how-to-upgrade-to-flower-next.rst:317 +msgid "Further help" +msgstr "Aide supplémentaire" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:2 #, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Upgrade to Flower Next" +msgstr "Passe à Flower 1.0" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." +#: ../../source/how-to-upgrade-to-flower-next.rst:4 +msgid "" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:9 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:13 +msgid "Let's dive in!" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:48 #, fuzzy -msgid "Train the provided parameters using the locally held dataset." -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:54 +msgid "or if you need Flower Next with simulation:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:61 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." +#: ../../source/how-to-upgrade-to-flower-next.rst:71 +msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#, fuzzy +msgid "Using Poetry" +msgstr "Utiliser la poésie (recommandé)" + +#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#, fuzzy msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#, fuzzy +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/how-to-upgrade-to-flower-next.rst:102 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:109 #, fuzzy -msgid "Convert to object to Client type and return it." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +msgid "|clientapp_link|_" +msgstr "client" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-next.rst:110 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +#: ../../source/how-to-upgrade-to-flower-next.rst:132 #, fuzzy -msgid "The current (global) model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +msgid "|serverapp_link|_" +msgstr "serveur" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-upgrade-to-flower-next.rst:133 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of -msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +#: ../../source/how-to-upgrade-to-flower-next.rst:154 +msgid "Deployment" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-next.rst:155 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#: ../../source/how-to-upgrade-to-flower-next.rst:158 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-next.rst:174 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#, fuzzy +msgid "Simulation in CLI" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-upgrade-to-flower-next.rst:202 msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-upgrade-to-flower-next.rst:232 msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-next.rst:249 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -#, fuzzy -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-upgrade-to-flower-next.rst:275 +msgid "Simulation in a Notebook" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:276 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +#: ../../source/how-to-upgrade-to-flower-next.rst:319 #, fuzzy -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#, fuzzy +msgid "Important" +msgstr "Changements importants :" + +#: ../../source/how-to-upgrade-to-flower-next.rst:328 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:334 +msgid "Happy migrating! 🚀" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -#, fuzzy -msgid "Client-side adaptive clipping modifier." -msgstr "Logique côté client" +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-use-built-in-mods.rst:6 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -#, fuzzy -msgid "Client-side fixed clipping modifier." -msgstr "Logique côté client" +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" +msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:48 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-use-built-in-mods.rst:59 msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +#: ../../source/how-to-use-built-in-mods.rst:72 #, fuzzy -msgid "Parameters size mod." -msgstr "Paramètres du modèle." +msgid "Order of execution" +msgstr "Dépréciations" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-use-built-in-mods.rst:74 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -#, fuzzy -msgid "Modifier for local differential privacy." -msgstr "Confidentialité différentielle" - -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of -msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-use-built-in-mods.rst:82 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-use-built-in-mods.rst:87 msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "Confidentialité différentielle" + +#: ../../source/how-to-use-differential-privacy.rst:3 +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-use-differential-privacy.rst:16 msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of +#: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy -msgid "Notes" -msgstr "Aucun" +msgid "Server-side Clipping" +msgstr "Logique côté serveur" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." -msgstr "" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" -msgstr "" - -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-use-differential-privacy.rst:22 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." -msgstr "" - -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" -msgstr "" - -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "parameters\\_size\\_mod" -msgstr "Paramètres du modèle." +msgid "server side clipping" +msgstr "Logique côté serveur" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-use-differential-privacy.rst:31 msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:52 #, fuzzy -msgid "secaggplus\\_mod" -msgstr "Flux de travail" +msgid "Client-side Clipping" +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -#, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.client.start_client.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "start\\_client" -msgstr "start_client" +msgid "client side clipping" +msgstr "Logique côté client" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-use-differential-privacy.rst:63 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." -msgstr "" - -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" msgstr "" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-use-differential-privacy.rst:80 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-use-differential-privacy.rst:97 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of -msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of -msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-use-differential-privacy.rst:122 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: flwr.client.app.start_client:31 of -msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" msgstr "" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-use-differential-privacy.rst:126 msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." -msgstr "" - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "" - -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "" - -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#: ../../source/how-to-use-strategies.rst:2 #, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" +msgid "Use strategies" +msgstr "Stratégies personnalisées" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-use-strategies.rst:4 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." msgstr "" +"Flower permet une personnalisation complète du processus d'apprentissage " +"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " +"intégrées sont fournies dans le cadre principal." -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" +"Il y a trois façons de personnaliser la manière dont Flower orchestre le " +"processus d'apprentissage du côté du serveur :" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "commun" - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -#, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "Personnalise une stratégie existante avec des fonctions de rappel" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "Mets en place une nouvelle stratégie" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "Utilise une stratégie existante" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-strategies.rst:16 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" +"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " +"populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" msgstr "" -"Configure la journalisation vers un fichier et/ou un serveur de " -"journalisation distant." +"Cela crée une stratégie dont tous les paramètres sont laissés à leur " +"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " +"est généralement recommandé d'ajuster quelques paramètres lors de " +"l'instanciation :" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-strategies.rst:42 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" +"Les stratégies existantes offrent plusieurs façons de personnaliser leur " +"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" +" le code fourni par l'utilisateur pendant l'exécution." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "Configurer l'adaptation et l'évaluation du client" + +#: ../../source/how-to-use-strategies.rst:47 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." msgstr "" +"Le serveur peut transmettre de nouvelles valeurs de configuration au " +"client à chaque tour en fournissant une fonction à " +":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " +"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " +"de configuration qui seront envoyées au client. Elle doit renvoyer un " +"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " +"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " +"fédéré." -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-use-strategies.rst:75 +#, fuzzy +msgid "" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." msgstr "" +"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " +"configuration arbitraires du serveur au client, et changer poétiquement " +"ces valeurs à chaque tour, par exemple pour ajuster le taux " +"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " +":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:78 +msgid "" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" msgstr "" +"Comme pour :code:`on_fit_config_fn`, il existe aussi " +":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" +" à :code:`client.evaluate()`" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "" -"Construit une date à partir de time.time() avec le fuseau horaire réglé " -"sur UTC." +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" +msgstr "Configuration de l'évaluation côté serveur" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-strategies.rst:83 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." msgstr "" +"L'évaluation côté serveur peut être activée en passant une fonction " +"d'évaluation à :code:`evaluate_fn`." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "Convertit les ndarrays NumPy en objets de paramètres." - -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" +"L'écriture d'une stratégie entièrement personnalisée est un peu plus " +"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " +"`Implémentation des stratégies `_ pour " +"en savoir plus." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "Tutoriel" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" -msgstr "" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "Quickstart tutorials" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." -msgstr "" +#: ../../source/index.rst:75 ../../source/index.rst:79 +msgid "How-to guides" +msgstr "Guides" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +#: ../../source/index.rst:100 +msgid "Legacy example guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "" -"ClientMessage est un conteneur utilisé pour contenir un message de " -"résultat." +#: ../../source/index.rst:108 ../../source/index.rst:112 +msgid "Explanations" +msgstr "Explications" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr "" +#: None:-1 +msgid "API reference" +msgstr "Référence pour l'API" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "Codes d'état du client." +#: ../../source/index.rst:138 +msgid "Reference docs" +msgstr "Référence pour la documentation" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:154 #, fuzzy -msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid "Contributor tutorials" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/index.rst:161 #, fuzzy -msgid "Configs record." -msgstr "Configurer les clients" +msgid "Contributor how-to guides" +msgstr "Guide pour les contributeurs" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr "" +#: ../../source/index.rst:173 +#, fuzzy +msgid "Contributor explanations" +msgstr "Explications" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." -msgstr "" +#: ../../source/index.rst:179 +#, fuzzy +msgid "Contributor references" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "Message DisconnectRes envoyé par le client au serveur." +#: ../../source/index.rst:2 +#, fuzzy +msgid "Flower Framework Documentation" +msgstr "Rédiger de la documentation" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:7 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" +"Bienvenue sur la documentation de Flower. `Flower `_ " +"est un framework de federated learning convivial et facile à utiliser." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "Évaluer les instructions pour un client." +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Rejoignez la communauté de Flower" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:13 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" +"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" +" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " +"autres hobbyistes." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "Évaluer la réponse d'un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr "" +#: ../../source/index.rst:15 +msgid "Join us on Slack" +msgstr "Join us on Slack" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "Types d'événements télémétriques." +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower Framework" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" +"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " +"veulent utiliser Flower pour transposer des workloads de Machine Learning" +" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " +"cela le plus evident possible. Lisez la suite pour en apprendre plus." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "Instructions d'ajustement pour un client." +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "Tutoriels" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:32 msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" +"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " +"débuter." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "Réponse adaptée d'un client." +#: ../../source/index.rst:62 +#, fuzzy +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " +":ref:`TensorFlow ` | :ref:`🤗 Transformers " +"` | :ref:`JAX ` | :ref:`Pandas " +"` | :ref:`fastai ` | :ref:`PyTorch " +"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " +"` | :ref:`Android ` | :ref:`iOS " +"`" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/index.rst:64 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "Demande de paramètres pour un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "Demande de propriétés pour un client." +#: ../../source/index.rst:69 +#, fuzzy +msgid "And TensorFlow:" +msgstr "Exemples de TensorFlow" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:77 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" +"Guides orientés sur la résolutions étapes par étapes de problèmes ou " +"objectifs specifiques." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "Réponse des propriétés d'un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:110 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" +"Guides orientés sur la compréhension et l'explication des sujets et idées" +" de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." -msgstr "" +#: ../../source/index.rst:121 +#, fuzzy +msgid "References" +msgstr "Référence" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr "" +#: ../../source/index.rst:123 +msgid "Information-oriented API reference and other reference material." +msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/index.rst:132::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: ../../source/index.rst:132::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." -msgstr "" +#: ../../source/index.rst:149 +#, fuzzy +msgid "Contributor docs" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:151 +#, fuzzy msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" +"Les auteurs de Flower sont heureux d'accueillir des contributions " +"externes. Les guides suivant sont là pour vous accompagner dans cette " +"direction." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." -msgstr "" +#: ../../source/ref-api-cli.rst:2 +#, fuzzy +msgid "Flower CLI reference" +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -msgstr "" +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Options" +msgstr "Solution" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr:1 msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "Paramètres du modèle." +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr build:1 msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -#, fuzzy -msgid "Parameters record." -msgstr "Paramètres du modèle." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "Message de reconnexion du serveur au client." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "" +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Installer Flower" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -"ServerMessage est un conteneur utilisé pour contenir un message " -"d'instruction." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "Statut du client." - -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../flwr install:1 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of -msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: ../../source/ref-api-cli.rst #, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Arguments" +msgstr "Amélioration de la documentation" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy -msgid "Return the array as a NumPy array." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +msgid "Optional argument" +msgstr "Améliorations facultatives" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../flwr log #, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "default" +msgstr "Flux de travail" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#: ../../flwr log:1 #, fuzzy -msgid "ClientMessage" -msgstr "Côté client" +msgid "Required argument" +msgstr "Amélioration de la documentation" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" -msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "Create new Flower App." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_res " -"`\\" -msgstr "" +#: ../../flwr new +#, fuzzy +msgid "options" +msgstr "Solution" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../flwr new:1 msgid "" -":py:obj:`get_properties_res " -"`\\" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "" +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr run:1 msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr run:1 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr "" +#: ../../source/ref-api-cli.rst:16 +#, fuzzy +msgid "flower-simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" -msgstr "" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid "ConfigsRecord" -msgstr "Configurer les clients" +msgid "flower-supernode" +msgstr "Serveur de Flower" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/ref-api-cli.rst:46 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + +#: ../../source/ref-api-cli.rst:49 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " +"longer supports passing a reference to a `ServerApp` attribute. Instead, " +"you need to pass the path to Flower app via the argument :code:`--app`. " +"This is the path to a directory containing a `pyproject.toml`. You can " +"create a valid Flower app by executing :code:`flwr new` and following the" +" prompt." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api-cli.rst:62 +#, fuzzy +msgid "flower-superexec" +msgstr "flower-superlink" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." -msgstr "" +#: ../../source/ref-api/flwr.rst:2 +#, fuzzy +msgid "flwr" +msgstr "Fleur" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.client `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +#, fuzzy +msgid "Flower client." +msgstr "Client de Flower" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.common `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "Composants communs partagés entre le serveur et le client." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.server `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +#, fuzzy +msgid "Flower server." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.simulation `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +#, fuzzy +msgid "Flower simulation." +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "client" + +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +#, fuzzy +msgid "Functions" +msgstr "Les quatre fonctions :" + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." msgstr "" -#: flwr.common.context.Context:3 of +#: ../../source/ref-api/flwr.client.rst:32::1 msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "Flower ClientApp." +msgstr "Flower ClientApp." -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" -msgstr "" +#: ../../source/ref-api/flwr.client.rst:50::1 +#, fuzzy +msgid ":py:obj:`flwr.client.mod `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +#, fuzzy +msgid "Flower Built-in Mods." +msgstr "Client de Flower" + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +#, fuzzy +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of #, fuzzy -msgid "EvaluateIns" -msgstr "Explications" +msgid "Get the run context from this client." +msgstr "Évaluer la réponse d'un client." -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +#, fuzzy +msgid "Return the current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" + +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of #, fuzzy +msgid "Parameters" +msgstr "Paramètres du modèle." + +#: flwr.client.client.Client.evaluate:3 of msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." -msgstr "" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +#, fuzzy +msgid "Returns" +msgstr "Ressources" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.evaluate:8 of msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.client.Client.fit:3 of msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr "serveur.stratégie.Stratégie" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: flwr.client.client.Client.fit:8 of msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.get_parameters:3 of msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.get_parameters:7 of #, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "The current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 #, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "ClientApp" +msgstr "client" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of #, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." -msgstr "" +msgid "Examples" +msgstr "Exemples de PyTorch" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:5 of msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:16 of msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:21 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgid ":py:obj:`train `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of #, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Train the provided parameters using the locally held dataset." +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of #, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Convert to object to Client type and return it." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of #, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "The current (global) model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +#, fuzzy +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of #, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of #, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Client-side adaptive clipping modifier." +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of #, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "" +msgid "Client-side fixed clipping modifier." +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of #, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Parameters size mod." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of #, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Modifier for local differential privacy." +msgstr "Confidentialité différentielle" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" -msgstr "" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +#, fuzzy +msgid "Notes" +msgstr "Aucun" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#, fuzzy +msgid "parameters\\_size\\_mod" +msgstr "Paramètres du modèle." -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 #, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "serveur.stratégie.Stratégie" +msgid "secaggplus\\_mod" +msgstr "Flux de travail" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" +msgid "start\\_client" +msgstr "start_client" -#: flwr.common.EventType.capitalize:3 of +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.common.EventType.count:1 of +#: flwr.client.app.start_client:9 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: flwr.common.EventType.encode:3 of -msgid "encoding" +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.common.EventType.encode:6 of +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.common.EventType.endswith:1 of +#: flwr.client.app.start_client:31 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: flwr.common.EventType.format_map:1 of +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#, fuzzy +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" + +#: flwr.client.app.start_numpy_client:5 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "commun" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.EventType.isalpha:3 of -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.common.EventType.isascii:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: flwr.common.EventType.isdecimal:3 of -msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" +"Configure la journalisation vers un fichier et/ou un serveur de " +"journalisation distant." -#: flwr.common.EventType.isdigit:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: flwr.common.EventType.islower:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.EventType.isprintable:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.common.EventType.isspace:3 of -msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "Convertit les ndarrays NumPy en objets de paramètres." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" +"Construit une date à partir de time.time() avec le fuseau horaire réglé " +"sur UTC." -#: flwr.common.EventType.isupper:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.EventType.join:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: flwr.common.EventType.maketrans:3 of -msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" +"ClientMessage est un conteneur utilisé pour contenir un message de " +"résultat." -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: flwr.common.EventType.partition:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "Codes d'état du client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: flwr.common.EventType.removeprefix:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.EventType.removesuffix:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "Configurer les clients" + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: flwr.common.EventType.replace:4 of -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.common.EventType.replace:7 of -msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "Message DisconnectRes envoyé par le client au serveur." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.EventType.rpartition:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.EventType.rpartition:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "Évaluer les instructions pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "Évaluer la réponse d'un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "Types d'événements télémétriques." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "Instructions d'ajustement pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "Réponse adaptée d'un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "Demande de paramètres pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: flwr.common.EventType.split:13 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "Demande de propriétés pour un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: flwr.common.EventType.splitlines:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "Réponse des propriétés d'un client." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: flwr.common.EventType.startswith:1 of -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: flwr.common.EventType.title:3 of -msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.translate:5 of -#, fuzzy -msgid "table" -msgstr "Database" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "" -#: flwr.common.EventType.translate:4 of -msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.translate:7 of -msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Metrics `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "Metrics recod." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArrays `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -#, fuzzy -msgid "GetParametersIns" -msgstr ":code:`get_parameters`" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy -msgid "GetParametersRes" -msgstr ":code:`get_parameters`" +msgid "Parameters record." +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "Message de reconnexion du serveur au client." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." msgstr "" +"ServerMessage est un conteneur utilisé pour contenir un message " +"d'instruction." -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -#, fuzzy -msgid "Message" -msgstr "Côté serveur" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "Statut du client." -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" msgstr "" -#: flwr.common.message.Message:5 of +#: flwr.common.record.parametersrecord.Array:3 of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -#: flwr.common.message.Message:8 of +#: flwr.common.record.parametersrecord.Array:6 of msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.record.parametersrecord.Array:8 of msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.record.parametersrecord.Array:12 of msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." -msgstr "" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" -msgstr "" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`shape `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "The content of this message." -msgstr "Évaluer la réponse d'un client." +msgid ":py:obj:`stype `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." -msgstr "" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#, fuzzy +msgid "ClientMessage" +msgstr "Côté client" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" -msgstr "" - -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.common.message.Message.create_reply:3 of -msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" -msgstr "" +#: ../../source/ref-api/flwr.common.Config.rst:2 +#, fuzzy +msgid "Config" +msgstr "Configurer les clients" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "Configurer les clients" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.common.record.configsrecord.ConfigsRecord:42 of msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.common.message.Metadata:21 of -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: flwr.common.context.Context:5 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.common.context.Context:8 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy -msgid "ParametersRecord" -msgstr "Paramètres du modèle." +msgid "EvaluateIns" +msgstr "Explications" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of -msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -#, fuzzy -msgid "ReconnectIns" -msgstr "Collecte centralisée des données" - -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ServerMessage" -msgstr "Côté serveur" +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`title `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_ins " -"`\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`get_properties_ins " -"`\\" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -#, fuzzy -msgid "Status" -msgstr "Statut du client." - -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -#, fuzzy -msgid "configure" -msgstr "Configurer les clients" - -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: logging.Logger.log:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -"Pour transmettre des informations sur les exceptions, utilise l'argument " -"mot-clé exc_info avec une valeur vraie, par ex." -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "serveur" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Run Flower server app." -msgstr "Serveur de Flower" +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -#, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "flower-fleet-api" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." -msgstr "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -#, fuzzy -msgid "Flower ServerApp." -msgstr "Serveur de Flower" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -#, fuzzy -msgid "Flower server config." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" -msgstr "" +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" +msgid ":py:obj:`upper `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of -#, fuzzy -msgid "Workflows." -msgstr "Flux de travail" - -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -#, fuzzy -msgid "ClientManager" -msgstr "client" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." +msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Driver" -msgstr "serveur" +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -#, fuzzy -msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of -msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`maketrans `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of -msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." -msgstr "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "Évaluation centralisée" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "Évaluation centralisée" +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`config `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`strategy `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`history `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`state `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "Serveur" +#: flwr.common.EventType.encode:3 of +msgid "encoding" +msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: flwr.common.EventType.encode:9 of +msgid "errors" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.encode:6 of msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.endswith:1 of msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.format:1 of msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -#, fuzzy -msgid "Replace server strategy." -msgstr "stratégie.du.serveur" - -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -#, fuzzy -msgid "ServerApp" -msgstr "serveur" - -#: flwr.server.server_app.ServerApp:5 of -#, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "Utilise une stratégie existante" - -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -#, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -#, fuzzy -msgid "ServerConfig" -msgstr "serveur" - -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.common.EventType.isalpha:3 of msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isnumeric:3 of msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isprintable:3 of msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isspace:3 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.istitle:3 of msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isupper:3 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.common.EventType.join:3 of msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -#, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" - -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -#, fuzzy -msgid "start\\_server" -msgstr "serveur.start_server" +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.common.EventType.replace:5 of +msgid "count" msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.common.EventType.replace:4 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: flwr.common.EventType.replace:7 of msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.common.EventType.rpartition:3 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.rpartition:7 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." msgstr "" -#: flwr.server.app.start_server:32 of -#, fuzzy -msgid "CA certificate." -msgstr "Certificats" +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "" -#: flwr.server.app.start_server:33 of -#, fuzzy -msgid "server certificate." -msgstr "Certificats" +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "" -#: flwr.server.app.start_server:34 of -#, fuzzy -msgid "server private key." -msgstr "stratégie.du.serveur" +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: flwr.server.app.start_server:42 of -#, fuzzy -msgid "Starting an insecure server:" -msgstr "Démarrer le serveur" +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "" -#: flwr.server.app.start_server:46 of -#, fuzzy -msgid "Starting an SSL-enabled server:" -msgstr "Démarrer le serveur" +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -#, fuzzy -msgid "strategy" -msgstr "stratégie.du.serveur" +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.title:3 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of +#: flwr.common.EventType.translate:5 of #, fuzzy -msgid "Bulyan strategy." -msgstr "Stratégies intégrées" +msgid "table" +msgstr "Database" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.translate:4 of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.translate:7 of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#, fuzzy +msgid "GetParametersIns" +msgstr ":code:`get_parameters`" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 #, fuzzy -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgid "GetParametersRes" +msgstr ":code:`get_parameters`" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -#, fuzzy -msgid "Federated Averaging strategy." -msgstr "Stratégie de moyenne fédérée." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of +#: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy -msgid "Federated Averaging with Momentum strategy." -msgstr "Stratégie de moyenne fédérée." +msgid "Message" +msgstr "Côté serveur" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -#, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "Configuration de l'évaluation fédérée" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message:5 of msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -#, fuzzy -msgid "Federated Optim strategy." -msgstr "Stratégie de moyenne fédérée." +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -#, fuzzy -msgid "Federated Optimization strategy." -msgstr "Stratégie de moyenne fédérée." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +#, fuzzy +msgid "The content of this message." +msgstr "Évaluer la réponse d'un client." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message.create_reply:3 of msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "Initial global model parameters." -msgstr "Initialise le modèle global" +msgid ":py:obj:`created_at `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Aggregate evaluation losses using weighted average." -msgstr "Résultats globaux de l'évaluation." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Aggregate fit results using Bulyan." -msgstr "Résultats globaux de l'évaluation." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -#, fuzzy -msgid "Configure the next round of evaluation." -msgstr "Configuration de l'évaluation côté serveur" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "Suivi des mesures" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Initialize global model parameters." -msgstr "Initialise le modèle global" +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: flwr.common.record.metricsrecord.MetricsRecord:28 of +msgid "" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: flwr.common.record.metricsrecord.MetricsRecord:39 of +msgid "" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." +msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: flwr.common.record.metricsrecord.MetricsRecord:50 of +msgid "" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Aggregate evaluation losses using the given strategy." -msgstr "Résultats globaux de l'évaluation." +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -#, fuzzy -msgid "The current round of federated learning." -msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of -msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -msgstr "" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "Paramètres du modèle." -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of -msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." -msgstr "" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "" +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "Prérequis" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy -msgid "Create a strategy:" -msgstr "stratégie.du.serveur" +msgid "ReconnectIns" +msgstr "Collecte centralisée des données" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -#, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -#, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "Confidentialité différentielle" - -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: flwr.common.record.recordset.RecordSet:47 of +msgid "" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "Puis sérialise le résultat agrégé :" +msgid "ServerMessage" +msgstr "Côté serveur" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" -msgstr "" +#: ../../source/ref-api/flwr.common.Status.rst:2 +#, fuzzy +msgid "Status" +msgstr "Statut du client." -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of -msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.configure.rst:2 +#, fuzzy +msgid "configure" +msgstr "Configurer les clients" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: logging.Logger.log:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" +"Pour transmettre des informations sur les exceptions, utilise l'argument " +"mot-clé exc_info avec une valeur vraie, par ex." -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -#, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "Confidentialité différentielle" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "serveur" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +#, fuzzy +msgid "Flower server config." +msgstr "Serveur de Flower" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -#, fuzzy -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#, fuzzy +msgid "ClientManager" +msgstr "client" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "" + +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" +msgid "Driver" +msgstr "serveur" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`run `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" -msgstr "" +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "Simulation de moniteur" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -#, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -#, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "Évaluation centralisée" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of #, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." +msgid "Add metrics entries (from centralized evaluation)." +msgstr "Évaluation centralisée" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of -msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "Serveur" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -#, fuzzy -msgid "Aggregate fit results using median." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +#, fuzzy +msgid "Replace server strategy." +msgstr "stratégie.du.serveur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "serveur" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" -msgstr "" +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "Utilise une stratégie existante" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "" +#: flwr.server.server_app.ServerApp.main:1::1 of +#, fuzzy +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +#, fuzzy +msgid "ServerAppComponents" +msgstr "serveur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of +#, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +":py:obj:`client_manager " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "serveur" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" -msgstr "" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "serveur.start_server" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:12 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" +#: flwr.server.app.start_server:32 of +#, fuzzy +msgid "CA certificate." +msgstr "Certificats" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" +#: flwr.server.app.start_server:33 of +#, fuzzy +msgid "server certificate." +msgstr "Certificats" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: flwr.server.app.start_server:34 of +#, fuzzy +msgid "server private key." +msgstr "stratégie.du.serveur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" -msgstr "" +#: flwr.server.app.start_server:42 of +#, fuzzy +msgid "Starting an insecure server:" +msgstr "Démarrer le serveur" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" +#: flwr.server.app.start_server:46 of +#, fuzzy +msgid "Starting an SSL-enabled server:" +msgstr "Démarrer le serveur" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:2 #, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "Résultats globaux de l'évaluation." +msgid "strategy" +msgstr "stratégie.du.serveur" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +#, fuzzy +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 #: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 #: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 #: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Federated Averaging strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +#, fuzzy +msgid "Federated Averaging with Momentum strategy." +msgstr "Stratégie de moyenne fédérée." -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +#, fuzzy +msgid "Federated Optim strategy." +msgstr "Stratégie de moyenne fédérée." -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +#, fuzzy +msgid "Federated Optimization strategy." +msgstr "Stratégie de moyenne fédérée." -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "Aggregate fit results using Krum." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -#, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" - -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of #, fuzzy -msgid "Strategy" -msgstr "stratégie.du.serveur" +msgid "Initial global model parameters." +msgstr "Initialise le modèle global" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "Aggregate evaluation results." +msgid "Aggregate evaluation losses using weighted average." msgstr "Résultats globaux de l'évaluation." -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "Aggregate training results." +msgid "Aggregate fit results using Bulyan." msgstr "Résultats globaux de l'évaluation." -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +#, fuzzy +msgid "Configure the next round of evaluation." +msgstr "Configuration de l'évaluation côté serveur" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 #: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -#, fuzzy -msgid "Evaluate the current model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "Initialize the (global) model parameters." +msgid "Initialize global model parameters." msgstr "Initialise le modèle global" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of -msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "workflow" -msgstr "Flux de travail" +msgid "Aggregate evaluation losses using the given strategy." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of -msgid "The workflow for the SecAgg+ protocol." +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -#, fuzzy -msgid "DefaultWorkflow" -msgstr "Flux de travail" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of #, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "Flux de travail" +msgid "The current round of federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of -msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 #: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of -msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 #: of -msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +msgid "The number of clients that are sampled on each round." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 #: of msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 #: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 #: of -msgid "Execute the 'collect masked vectors' stage." +msgid "" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 #: of -msgid "Execute the 'setup' stage." -msgstr "" +#, fuzzy +msgid "Create a strategy:" +msgstr "stratégie.du.serveur" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 #: of msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 #: of -msgid "Execute the 'share keys' stage." +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of -msgid "Execute the 'unmask' stage." +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "SecAggWorkflow" -msgstr "Flux de travail" +msgid "Aggregate training results and update clip norms." +msgstr "Résultats globaux de l'évaluation." -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of -msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "Confidentialité différentielle" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 #: of msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 #: of msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "simulation" -msgstr "Simulation de moniteur" +msgid "Add noise to the aggregated parameters." +msgstr "Puis sérialise le résultat agrégé :" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -#, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -#, fuzzy -msgid "run\\_simulation" -msgstr "Simulation de moniteur" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy -msgid "start\\_simulation" -msgstr "démarrer_simulation" +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "Confidentialité différentielle" -#: flwr.simulation.app.start_simulation:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "Changelog" - -#: ../../source/ref-changelog.md:3 -#, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" -msgstr "Merci à nos contributeurs" +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -"Nous tenons à remercier tout particulièrement tous les contributeurs qui " -"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:9 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" -msgstr "Quoi de neuf ?" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" -#: ../../source/ref-changelog.md:13 -#, fuzzy -msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:19 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:23 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:27 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:31 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:35 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:39 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:43 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:47 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:51 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:61 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:65 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 #, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" -msgstr "Dépréciations" - -#: ../../source/ref-changelog.md:77 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of #, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "**Créer le PR**" +msgid "Deserialize NumPy array from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:81 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:85 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:87 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" -msgstr "Changements incompatibles" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:95 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of #, fuzzy -msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +msgid "Convert parameters object to NumPy weights." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-changelog.md:103 -#, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:117 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:119 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:121 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" -msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:125 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:127 -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: ../../source/ref-changelog.md:129 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:133 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of #, fuzzy +msgid "Aggregate fit results using median." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:137 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:141 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:143 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:145 -#, fuzzy -msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:147 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:149 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:151 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:157 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:159 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" -msgstr "Aucun" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" -#: ../../source/ref-changelog.md:167 -#, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: ../../source/ref-changelog.md:177 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:179 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-changelog.md:181 -#, fuzzy -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:183 -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: ../../source/ref-changelog.md:185 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:187 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-changelog.md:189 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:193 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:195 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:197 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:201 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:205 -#, fuzzy -msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:207 -#, fuzzy +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:209 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:211 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:213 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:215 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -#, fuzzy -msgid "**Update Flower Baselines**" -msgstr "Demande pour une nouvelle Flower Baseline" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:221 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:222 -#, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:223 -#, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:224 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +msgid "Aggregate evaluation metrics using average." +msgstr "Résultats globaux de l'évaluation." -#: ../../source/ref-changelog.md:225 -#, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:226 -#, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:228 -#, fuzzy +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:232 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:240 -#, fuzzy +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:242 -msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-changelog.md:244 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:248 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:250 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:252 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:256 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:258 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:260 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: ../../source/ref-changelog.md:270 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:272 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:276 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:280 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:282 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:284 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-changelog.md:288 -#, fuzzy -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: ../../source/ref-changelog.md:292 -#, fuzzy +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:298 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:300 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:302 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:304 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:306 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 -#, fuzzy +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" + +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:314 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:316 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of #, fuzzy +msgid "Aggregate fit results using Krum." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:320 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:322 -#, fuzzy -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-changelog.md:324 -#, fuzzy -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:326 -#, fuzzy -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:328 -#, fuzzy -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:330 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 #, fuzzy -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +msgid "QFedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:332 -#, fuzzy -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:334 -#, fuzzy -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:336 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:338 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:340 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:342 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:344 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:346 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " -"pour être énumérées ici." -#: ../../source/ref-changelog.md:352 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy +msgid "Aggregate evaluation results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:354 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate training results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:356 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:360 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of #, fuzzy -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.4.0 (2023-04-21)" +msgid "Evaluate the current model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of #, fuzzy +msgid "Initialize the (global) model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:372 -msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:380 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:384 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-api/flwr.server.workflow.rst:2 +#, fuzzy +msgid "workflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:388 -#, fuzzy -msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:392 -#, fuzzy -msgid "**Deprecate Python 3.7**" -msgstr "**Créer le PR**" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." +msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:396 -#, fuzzy -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:398 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 #, fuzzy -msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." -msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." +msgid "DefaultWorkflow" +msgstr "Flux de travail" -#: ../../source/ref-changelog.md:400 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 #, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:402 -msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: ../../source/ref-changelog.md:404 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:406 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-changelog.md:408 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:410 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:412 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:414 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:416 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:418 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:420 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:422 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:424 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:426 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:428 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:430 -#, fuzzy -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "" -#: ../../source/ref-changelog.md:432 -msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:434 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:436 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:450 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:454 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " -"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:456 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " -"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " -"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " -"XGBoost." -#: ../../source/ref-changelog.md:458 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:460 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " -"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " -"disposons désormais d'un SDK swift iOS présent sous " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" qui facilitera grandement le processus de création d'applications. Pour " -"présenter son utilisation, l'[exemple " -"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " -"été mis à jour !" -#: ../../source/ref-changelog.md:462 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" -" \"** ([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/ref-changelog.md:464 -#, fuzzy -msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -"Un nouveau [tutoriel d'entrée de gamme] " -"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " -"documentation explique les bases de l'apprentissage fédéré. Il permet à " -"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" -" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " -"l'apprentissage fédéré !" -#: ../../source/ref-changelog.md:466 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:468 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " -"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " -"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " -"qui vise à rendre la convergence plus robuste dans des contextes " -"hétérogènes." -#: ../../source/ref-changelog.md:470 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:472 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"Cette nouvelle ligne de base reproduit une expérience évaluant les " -"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" -" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " -"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/ref-changelog.md:474 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:476 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -"Une nouvelle API REST a été introduite comme alternative à la pile de " -"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " -"prend en charge que les clients anonymes." -#: ../../source/ref-changelog.md:478 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:480 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:482 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -"L'API du pilote est encore une fonction expérimentale, mais cette version" -" introduit quelques améliorations majeures. L'une des principales " -"améliorations est l'introduction d'une base de données SQLite pour " -"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " -"autre amélioration est que les tâches (instructions ou résultats) qui ont" -" été livrées seront désormais supprimées, ce qui améliore " -"considérablement l'efficacité de la mémoire d'un serveur Flower " -"fonctionnant depuis longtemps." -#: ../../source/ref-changelog.md:484 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"**Répare les problèmes de déversement liés à Ray pendant les " -"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-changelog.md:486 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -"Lors de l'exécution de longues simulations, `ray` déversait parfois " -"d'énormes quantités de données qui rendaient l'entraînement incapable de " -"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/ref-changelog.md:488 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:490 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"TabNet est un cadre puissant et flexible pour former des modèles " -"d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : [quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)." -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"**Ajouter un nouveau guide pratique pour le suivi des simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." +msgstr "" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"Nous avons maintenant un guide de documentation pour aider les " -"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/ref-changelog.md:496 +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "Simulation de moniteur" + +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -"**Ajouter des mesures de formation à** `History` **objet pendant les " -"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:498 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " -"les mesures d'entraînement, mais les versions précédentes " -"n'enregistraient pas les résultats dans l'objet `History`. c'est " -"désormais le cas !" -#: ../../source/ref-changelog.md:500 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/ada" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" -#: ../../source/ref-changelog.md:514 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:518 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " -"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:520 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " -"qui peut être utilisé pour identifier la charge de travail à laquelle une" -" tâche appartient. Elle prend également en charge un nouveau `group_id` " -"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " -"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " -"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "Changelog" + +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "Merci à nos contributeurs" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " -"flotte soit configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"Nous tenons à remercier tout particulièrement tous les contributeurs qui " +"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/ref-changelog.md:524 +#: ../../source/ref-changelog.md:9 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " -"API) peut maintenant configurer l'adresse du serveur de Driver API (via " -"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " -"de son démarrage :" -#: ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "Améliorations facultatives" + +#: ../../source/ref-changelog.md:13 #, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" -"address \"0.0.0.0:8086\" ``" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." +#: ../../source/ref-changelog.md:15 +#, fuzzy +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:530 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:532 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " -"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)." +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:534 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " -"dernières versions d'Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:536 +#: ../../source/ref-changelog.md:23 #, fuzzy msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"L'exemple de code Android a reçu une mise à jour substantielle : le " -"projet est compatible avec Flower 1.0 et les versions ultérieures, " -"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " -"est mis à jour pour être compatible avec les outils Android les plus " -"récents." +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:538 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" +msgstr "" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" + +#: ../../source/ref-changelog.md:27 +#, fuzzy +msgid "" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" +msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" + +#: ../../source/ref-changelog.md:29 +#, fuzzy +msgid "" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" "**Ajouter une nouvelle stratégie `FedProx`** " "([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:540 +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "Changements incompatibles" + +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:41 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "Quoi de neuf ?" + +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -"Cette " -"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" est presque identique à " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " -"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " -"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" -" rapport aux modèles globaux." -#: ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:47 msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"**Ajouter de nouvelles métriques aux événements de télémétrie** " -"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:49 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." +msgstr "" + +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -"Une structure d'événements mise à jour permet, par exemple, de regrouper " -"des événements au sein d'une même charge de travail." -#: ../../source/ref-changelog.md:546 +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur les stratégies " -"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-changelog.md:55 +msgid "" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." +msgstr "" + +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" + +#: ../../source/ref-changelog.md:59 +msgid "" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." +msgstr "" + +#: ../../source/ref-changelog.md:60 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." +msgstr "" + +#: ../../source/ref-changelog.md:61 +msgid "" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." +msgstr "" + +#: ../../source/ref-changelog.md:63 #, fuzzy msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" -" : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" -"-Strategy-PyTorch.ipynb)" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:550 +#: ../../source/ref-changelog.md:65 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur la sérialisation " -"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." +msgstr "" + +#: ../../source/ref-changelog.md:68 +msgid "" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." +msgstr "" + +#: ../../source/ref-changelog.md:69 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." +msgstr "" + +#: ../../source/ref-changelog.md:70 +msgid "" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" +msgstr "" + +#: ../../source/ref-changelog.md:72 #, fuzzy msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la sérialisation personnalisée : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.ipynb)" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:554 +#: ../../source/ref-changelog.md:74 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/ada" -#: ../../source/ref-changelog.md:558 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" "**Mise à jour de la documentation** " "([#1629](https://github.com/adap/flower/pull/1629), " @@ -18411,13002 +17238,18616 @@ msgstr "" "[#1613](https://github.com/adap/flower/pull/1613), " "[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 +#: ../../source/ref-changelog.md:78 msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " -"autre étape dans notre effort pour faire de la documentation de Flower la" -" meilleure documentation de tout projet. Reste à l'écoute et comme " -"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" - -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:576 +#: ../../source/ref-changelog.md:82 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"Au cours des prochaines semaines, nous publierons un certain nombre de " -"nouvelles implémentations de référence utiles en particulier pour les " -"nouveaux venus en FL. Elles revisiteront généralement des articles bien " -"connus de la littérature, et seront adaptées à l'intégration dans votre " -"propre application ou à l'expérimentation, afin d'approfondir votre " -"connaissance de FL en général. La publication d'aujourd'hui est la " -"première de cette série. [Lire la " -"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" -"cnn/)" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:86 msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -"**Améliorer la prise en charge des GPU dans les simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:582 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " -"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " -"certaines des leçons durement apprises lors de la mise à l'échelle des " -"simulations dans des environnements de grappes de GPU. De nouveaux " -"paramètres par défaut rendent l'exécution des simulations basées sur les " -"GPU beaucoup plus robuste." +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "Dépréciations" + +#: ../../source/ref-changelog.md:102 +#, fuzzy msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " -"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:104 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " -"toujours été faciles à utiliser sur les instances GPU. Nous les avons " -"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " -"Découvre les carnets mis à jour ici :" -#: ../../source/ref-changelog.md:588 +#: ../../source/ref-changelog.md:113 #, fuzzy msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -"[Une introduction à l'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:589 -#, fuzzy +#: ../../source/ref-changelog.md:115 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"[Stratégies d'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:117 #, fuzzy msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" +msgstr "" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" + +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"[Construire une stratégie] " -"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" -"PyTorch.html)" -#: ../../source/ref-changelog.md:591 +#: ../../source/ref-changelog.md:121 #, fuzzy msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" -"and-NumPyClient-PyTorch.html)" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:593 +#: ../../source/ref-changelog.md:123 msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:595 -msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"À la suite d'une [demande de commentaires] " -"(https://github.com/adap/flower/issues/1534) de la part de la communauté," -" le projet open-source Flower introduit la collecte optionnelle de " -"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " -"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " -"comment Flower est utilisé et quels sont les défis auxquels les " -"utilisateurs peuvent être confrontés." -#: ../../source/ref-changelog.md:597 +#: ../../source/ref-changelog.md:135 #, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** Restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " -"suite.](https://flower.ai/docs/telemetry.html)." +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:599 +#: ../../source/ref-changelog.md:137 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:601 +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" + +#: ../../source/ref-changelog.md:145 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" -" permettra de créer des applications Federated Learning et Federated " -"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " -"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " -"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" -" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/ref-changelog.md:603 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"L'API du pilote permet également un nouveau mode d'exécution dans lequel " -"le serveur s'exécute indéfiniment. Plusieurs charges de travail " -"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " -"leur exécution indépendamment du serveur. Ceci est particulièrement utile" -" pour les utilisateurs qui souhaitent déployer Flower en production." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:605 +#: ../../source/ref-changelog.md:151 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " -"attendons tes commentaires avec impatience !" -#: ../../source/ref-changelog.md:607 +#: ../../source/ref-changelog.md:153 +#, fuzzy msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -"Remarque : *L'API du pilote est encore expérimentale et est susceptible " -"de changer de manière significative au fil du temps.*" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:609 +#: ../../source/ref-changelog.md:155 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:611 +#: ../../source/ref-changelog.md:157 +#, fuzzy msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:613 +#: ../../source/ref-changelog.md:159 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:615 +#: ../../source/ref-changelog.md:161 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " -"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " -"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/ref-changelog.md:617 +#: ../../source/ref-changelog.md:163 +#, fuzzy msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " -"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:619 +#: ../../source/ref-changelog.md:165 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" -" compatible avec la dernière version de Flower." -#: ../../source/ref-changelog.md:621 +#: ../../source/ref-changelog.md:167 +#, fuzzy msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-changelog.md:169 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:629 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" -" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" - -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-changelog.md:173 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"Nous aimerions **remercier tout particulièrement** tous les contributeurs" -" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:645 +#: ../../source/ref-changelog.md:177 msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:647 +#: ../../source/ref-changelog.md:179 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -"Le premier aperçu (expérimental) des wrappers enfichables de " -"confidentialité différentielle permet de configurer et d'utiliser " -"facilement la confidentialité différentielle (DP). Les wrappers DP " -"enfichables permettent une utilisation agnostique du cadre **et** de la " -"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " -"voir les documents de Flower, un nouvel explicatif va plus loin dans les " -"détails." -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:181 msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:183 +#, fuzzy msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " -"clients Flower peuvent être construits pour iOS. L'exemple de code " -"contient à la fois des composants Flower iOS SDK qui peuvent être " -"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " -"sur CoreML." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:653 +#: ../../source/ref-changelog.md:185 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -"**Nouvelle stratégie de FedMedian** " -"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/ref-changelog.md:655 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:657 +#: ../../source/ref-changelog.md:189 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:659 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -"Toutes les exceptions `Client` qui se produisent dans le VCE sont " -"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" -" `Stratégie` configurée (via l'argument `failures`)." - -#: ../../source/ref-changelog.md:661 -msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "" -"**Améliorer le moteur du client virtuel** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:663 +#: ../../source/ref-changelog.md:193 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " -"dictionnaire `client_resources` a été remplacé par `float` pour permettre" -" l'allocation de fractions de ressources." -#: ../../source/ref-changelog.md:665 +#: ../../source/ref-changelog.md:195 +#, fuzzy msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:667 +#: ../../source/ref-changelog.md:197 msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -"Le moteur de client virtuel prend désormais en charge les méthodes " -"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/ref-changelog.md:669 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"**Fournir des informations de type aux paquets en utilisant** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:671 +#: ../../source/ref-changelog.md:201 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " -"indiquant que le paquet est typé. Cela permet de prendre en charge le " -"typage pour les projets ou les paquets qui utilisent `flwr` en leur " -"permettant d'améliorer leur code à l'aide de vérificateurs de types " -"statiques comme `mypy`." -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "Améliorations facultatives" -#: ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " -"mis à jour pour fonctionner avec la dernière version de Flower." +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:211 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"**Documentation mise à jour** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:213 +#, fuzzy msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -"Il y a eu tellement de mises à jour de la documentation que cela n'a même" -" pas de sens de les énumérer individuellement." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:215 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:683 -msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -"La documentation a été restructurée pour faciliter la navigation. Ce " -"n'est que la première étape d'un effort plus important visant à faire de " -"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:221 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"**Ouvrir dans le bouton Colab** " -"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:223 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " -"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " -"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " -"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " -"il te suffit d'un simple clic." -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:225 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:229 +#, fuzzy msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" -" parties couvrant les stratégies personnalisées (encore WIP) et la " -"distinction entre `Client` et `NumPyClient`. Les parties un et deux " -"existantes ont également été améliorées (beaucoup de petits changements " -"et de corrections)." - -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" -msgstr "Points forts" +#: ../../source/ref-changelog.md:231 +msgid "" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." +msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:233 +#, fuzzy +msgid "" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" +msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" +#: ../../source/ref-changelog.md:235 +msgid "" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." +msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" -msgstr "`get_parameters` configurable" +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:243 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"Des tonnes de petits nettoyages d'API résultant en une expérience plus " -"cohérente pour les développeurs" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:247 +#, fuzzy msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -"Nous tenons à remercier **particulièrement** tous les contributeurs qui " -"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors)) :" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:249 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:251 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"**Tous les arguments doivent être passés comme des arguments de mot-clé**" -" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:253 #, fuzzy msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"Le code qui utilise des arguments positionnels (par exemple, " -"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" -"clé pour chaque argument positionnel (par exemple, " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:255 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"**Introduire l'objet de configuration** `ServerConfig` **dans** " -"`start_server` **et** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:257 +#, fuzzy msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " -"attendent maintenant un objet de configuration de type " -"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " -"le dict de configuration précédent, mais il rend l'écriture de code " -"sécurisé plus facile et les valeurs des paramètres par défaut plus " -"transparentes." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:259 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:261 +#, fuzzy msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -"Les paramètres de stratégie intégrés suivants ont été renommés pour " -"améliorer la lisibilité et la cohérence avec d'autres API :" - -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:263 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -"**Mettre à jour les arguments par défaut des stratégies intégrées** " -"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:265 +#, fuzzy msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" -" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " -"les clients actuellement disponibles pour l'entraînement et l'évaluation." -" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " -"peuvent retrouver le comportement antérieur en initialisant la stratégie " -"de la manière suivante :" - -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:267 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -"**Ajouter** `server_round` **à** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre." +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:271 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" -" ([#1334](https://github.com/adap/flower/pull/1334))" - -#: ../../source/ref-changelog.md:742 -msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." -msgstr "" -"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " -"maintenant trois paramètres : (1) le cycle actuel " -"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" -" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" -" (`config`)." -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:273 +#, fuzzy msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:275 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " -"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " -"`aggregate_evaluate`) reçoivent le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" -" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " -"renommé de `rnd` à `server_round`." -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:277 +#, fuzzy msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:279 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:281 +#, fuzzy msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " -"`FedFSv0`, `FedFSv1`)." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:283 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:285 +#, fuzzy msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " -"rendre compte de la nature de ce type." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:287 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -"**Supprimez l'ancien** `force_final_distributed_eval` **de** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:762 +#: ../../source/ref-changelog.md:289 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " -"été un artefact historique, dans cette version il a finalement disparu " -"pour de bon." -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:291 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:293 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"La méthode `get_parameters` accepte maintenant un dictionnaire de " -"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:295 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" -" `config` **paramètre** " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:297 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -"La fonction `start_simulation` accepte maintenant un dictionnaire de " -"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" -" cohérence entre `start_simulation` et `start_server` et facilite la " -"transition entre les deux." -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:299 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:301 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -"La version précédente de Flower a introduit la prise en charge " -"expérimentale de Python 3.10, cette version déclare la prise en charge de" -" Python 3.10 comme stable." -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:303 +#, fuzzy msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " -"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:780 -msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " -"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " -"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " -"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " -"l'évaluation centralisée !" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:307 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "**Créer le PR**" + +#: ../../source/ref-changelog.md:313 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -"Comme pour `start_server`, `start_simulation` accepte maintenant une " -"instance complète de `Server`. Cela permet aux utilisateurs de " -"personnaliser fortement l'exécution des expériences et ouvre la porte à " -"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " -"virtuel." -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:315 +#, fuzzy msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:317 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -"De nombreux exemples de code ont reçu de petites ou même de grandes mises" -" à jour de maintenance" - -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" - -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch` (démarrage rapide)" - -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" - -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" - -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow` (en anglais)" - -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:319 +#, fuzzy msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -"**Supprime l'exemple de simulation obsolète** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:321 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"Supprime l'exemple obsolète `simulation` et renomme " -"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" -" au nom de `simulation_pytorch`" -#: ../../source/ref-changelog.md:801 +#: ../../source/ref-changelog.md:325 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:327 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"Une mise à jour substantielle de la documentation corrige plusieurs " -"petits problèmes de rendu, rend les titres plus succincts pour améliorer " -"la navigation, supprime une bibliothèque obsolète, met à jour les " -"dépendances de la documentation, inclut le module `flwr.common` dans la " -"référence de l'API, inclut le support de la documentation basée sur le " -"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " -"nombre de détails plus petits !" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" -msgstr "**Mises à jour mineures**" - -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:329 +#, fuzzy msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " -"([#1266](https://github.com/adap/flower/pull/1266))" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:331 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"Ajouter une connexion gRPC sécurisée à l'exemple de code " -"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:333 +#, fuzzy msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:335 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"Renomme les messages ProtoBuf pour améliorer la cohérence " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:343 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:818 -#, fuzzy +#: ../../source/ref-changelog.md:347 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -"La première version préliminaire de Flower Baselines est arrivée ! Nous " -"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " -"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html). Avec cette première version préliminaire, nous invitons " -"également la communauté à [contribuer à leurs propres lignes de " -"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:349 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"**SDK client C++ (aperçu) et exemple de code** " -"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " -"code de démarrage rapide qui démontre un client C++ simple utilisant le " -"SDK." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:353 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:355 +#, fuzzy msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"Python 3.10 est la dernière version stable de Python et Python 3.11 " -"devrait sortir en octobre. Cette version de Flower ajoute une prise en " -"charge expérimentale pour les deux versions de Python." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:357 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -"**Agréger des mesures personnalisées grâce à des fonctions fournies par " -"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"Les stratégies intégrées prennent en charge deux nouveaux arguments, " -"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " -"permettent de passer des fonctions d'agrégation de métriques " -"personnalisées." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:361 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"**Temps d'attente configurable par l'utilisateur** " -"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " -"valeur `float` en secondes), le serveur attendra *au moins* " -"`round_timeout` secondes avant de fermer la connexion." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:365 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " -"l'évaluation centralisée dans toutes les stratégies intégrées** " -"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:367 +#, fuzzy msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"Les stratégies intégrées peuvent maintenant effectuer une évaluation " -"fédérée (c'est-à-dire côté client) et une évaluation centralisée " -"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " -"être désactivée en réglant `fraction_eval` sur `0.0`." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:369 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"**Deux nouveaux tutoriels Jupyter Notebook** " -"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " -"expliquent les fonctionnalités de base et intermédiaires de Flower :" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:373 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -"*Introduction à l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:377 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" -" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:379 #, fuzzy msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." -msgstr "" -"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " -"momentum du serveur [Hsu et al., 2019]." - -#: ../../source/ref-changelog.md:852 -msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:381 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " -"fleur avancés avec PyTorch." -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:858 +#: ../../source/ref-changelog.md:385 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " -"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:387 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " -"initialisé dans `start_simulation` " -"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:389 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:391 #, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"Nouvelle documentation pour [mettre en œuvre des " -"stratégies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:393 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"Nouveau thème de documentation adapté aux mobiles " -"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:395 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " -"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:870 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "" -"**Supprime la prise en charge obsolète de Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:407 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:411 +#, fuzzy msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" - -#: ../../source/ref-changelog.md:873 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" -msgstr "" -"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" -" ([#869](https://github.com/adap/flower/pull/869))" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:413 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -"**Supprime la stratégie DefaultStrategy qui est obsolète** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:417 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"**Supprimer la prise en charge obsolète de la valeur de retour de la " -"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"**Supprime la prise en charge obsolète du passage des paramètres initiaux" -" en tant que ndarrays NumPy** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:421 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"**Amélioration de la compatibilité du moteur de client virtuel avec " -"Jupyter Notebook / Google Colab** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"Les simulations (utilisant le moteur de client virtuel via " -"`start_simulation`) fonctionnent maintenant plus facilement sur les " -"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " -"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:425 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"**Nouvel exemple de code Jupyter Notebook** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -"Un nouvel exemple de code (`quickstart_simulation`) démontre des " -"simulations de Flower en utilisant le moteur de client virtuel via " -"Jupyter Notebook (y compris Google Colab)." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:429 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -"**Propriétés du client (aperçu des fonctionnalités)** " -"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"Les clients peuvent implémenter une nouvelle méthode `get_properties` " -"pour permettre aux stratégies côté serveur d'interroger les propriétés du" -" client." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:433 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -"**Support expérimental d'Android avec TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:435 +#, fuzzy msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" -" la fois agnostique au niveau du client et du cadre de travail. On peut " -"intégrer des plates-formes client arbitraires et avec cette version, " -"l'utilisation de Flower sur Android est devenue beaucoup plus facile." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:437 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " -"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " -"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " -"part entière et une implémentation unifiée de `FedAvg` intégrant la " -"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"**Rendre le temps de garde gRPC configurable par l'utilisateur et " -"diminuer le temps de garde par défaut** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:441 +#, fuzzy msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " -"compatibilité de Flower avec davantage d'environnements cloud (par " -"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " -"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " -"spécifiques." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:443 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " -"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:445 +#, fuzzy msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " -"différentiellement privé avec Opacus, PyTorch et Flower." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:447 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -"**Nouvel exemple de code pour les Transformers à visage embrassant** " -"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:449 +#, fuzzy msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Un nouvel exemple de code (`quickstart_huggingface`) démontre " -"l'utilisation des transformateurs Hugging Face avec Flower." +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:913 -msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:915 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "" -"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" -" MLCube avec Flower." +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +#, fuzzy +msgid "**Update Flower Baselines**" +msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:455 +#, fuzzy msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:919 -msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -"SSL permet d'établir des connexions cryptées et sécurisées entre les " -"clients et les serveurs. Cette version met en open-source " -"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " -"communication cryptés accessibles à tous les utilisateurs de Flower." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:921 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +#: ../../source/ref-changelog.md:457 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:923 -msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +#: ../../source/ref-changelog.md:458 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:462 +#, fuzzy msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:464 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"`start_simulation` peut maintenant être appelé avec une liste " -"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " -"identifiants seront passés à `client_fn` chaque fois qu'un client doit " -"être initialisé, ce qui peut faciliter le chargement de partitions de " -"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:466 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -"Mettre à jour le calcul de `num_examples` dans les exemples de code " -"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:932 +#: ../../source/ref-changelog.md:468 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"Exposer la version de Flower à travers `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:470 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"`start_server` dans `app.py` renvoie maintenant un objet `History` " -"contenant les métriques de l'entraînement " -"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:476 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"Augmente le temps de sommeil après le démarrage du serveur à trois " -"secondes dans tous les exemples de code " -"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/ref-changelog.md:936 +#: ../../source/ref-changelog.md:478 +#, fuzzy msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -"Ajout d'une nouvelle section FAQ à la documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:480 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -"Et bien d'autres changements sous le capot, des mises à jour de la " -"bibliothèque, des modifications de la documentation et des améliorations " -"de l'outillage !" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:482 +#, fuzzy msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " -"release build** ([#869](https://github.com/adap/flower/pull/869))" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:484 +#, fuzzy msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " -"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " -"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " -"supprimés dans une prochaine version." - -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:949 +#: ../../source/ref-changelog.md:486 +#, fuzzy msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"**Moteur expérimental de client virtuel** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:951 +#: ../../source/ref-changelog.md:488 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"L'un des objectifs de Flower est de permettre la recherche à grande " -"échelle. Cette version donne un premier aperçu (expérimental) d'une " -"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " -"client virtuel. Les clients virtuels permettent des simulations qui " -"s'étendent à un (très) grand nombre de clients sur une seule machine ou " -"une grappe de calcul. La façon la plus simple de tester la nouvelle " -"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" -" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:490 +#, fuzzy msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -"La fonction est encore expérimentale, il n'y a donc aucune garantie de " -"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " -"prime time et s'accompagne de quelques mises en garde connues. Cependant," -" les personnes curieuses sont encouragées à l'essayer et à faire part de " -"leurs réflexions." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:492 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:957 +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" + +#: ../../source/ref-changelog.md:500 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:504 +#, fuzzy msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " -"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"**Nouvel exemple de code PyTorch Lightning** " -"([#617](https://github.com/adap/flower/pull/617))" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:508 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"**Nouvel exemple de code d'autocodage variationnel** " -"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:510 +#, fuzzy msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:512 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:514 +#, fuzzy msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -"Amélioration de l'exemple de code TensorFlow avancé " -"([#769](https://github.com/adap/flower/pull/769))" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:516 +#, fuzzy msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"Avertissement lorsque `min_available_clients` est mal configuré " -"([#830](https://github.com/adap/flower/pull/830))" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:973 -msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -"Amélioration du message d'erreur dans `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"Exemple de code de démarrage rapide PyTorch amélioré " -"([#852](https://github.com/adap/flower/pull/852))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:978 -msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -"**Désactivé l'évaluation finale distribuée** " -"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Le comportement précédent consistait à effectuer un dernier tour " -"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " -"souvent pas nécessaire (par exemple, lors de l'utilisation de " -"l'évaluation côté serveur). Le comportement précédent peut être activé en" -" passant `force_final_distributed_eval=True` à `start_server`." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:528 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:530 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " -"refléter la notation donnée dans l'article original (q-FFL est l'objectif" -" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " -"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " -"des raisons de compatibilité (elle sera supprimée dans une prochaine " -"version)." -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:532 +#, fuzzy msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:534 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " -"basé sur le moteur expérimental du client virtuel, qui deviendra la " -"nouvelle méthode par défaut pour effectuer la plupart des types de " -"simulations à grande échelle dans Flower. L'exemple existant a été " -"conservé à des fins de référence, mais il pourrait être supprimé à " -"l'avenir." -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" - -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:536 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" "**Nouvelles stratégies intégrées** " -"([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" -msgstr "(résumé) FedOpt" - -#: ../../source/ref-changelog.md:999 -msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:538 +#, fuzzy msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -"Le serveur Flower est maintenant totalement agnostique, toutes les " -"instances restantes de métriques spécifiques à une tâche (telles que " -"`accuracy`) ont été remplacées par des dictionnaires de métriques " -"personnalisées. Flower 0.15 a introduit la possibilité de passer un " -"dictionnaire contenant des métriques personnalisées du client au serveur." -" À partir de cette version, les métriques personnalisées remplacent les " -"métriques spécifiques à une tâche sur le serveur." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:540 #, fuzzy msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -"Les dictionnaires de métriques personnalisés sont maintenant utilisés " -"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " -"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " -"permettent aux fonctions d'évaluation passées aux stratégies intégrées " -"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " -"stratégies peuvent même renvoyer des dictionnaires de métriques " -"*agrégées* pour que le serveur puisse en garder la trace." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 #, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"Les implémentations de Stratey doivent migrer leurs méthodes " -"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " -"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " -"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " -"`return loss, {\"accuracy\" : accuracy}`." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:548 +#, fuzzy msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " -"pris en charge), la compatibilité sera supprimée dans une prochaine " -"version." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:550 +#, fuzzy msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -"**Avertissements de migration pour les fonctionnalités obsolètes** " -"([#690](https://github.com/adap/flower/pull/690))" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:1011 +#: ../../source/ref-changelog.md:552 +#, fuzzy msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"Les versions antérieures de Flower ont souvent été migrées vers de " -"nouvelles API, tout en maintenant la compatibilité avec les anciennes " -"API. Cette version introduit des messages d'avertissement détaillés si " -"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " -"d'avertissement fournissent souvent des détails sur la façon de migrer " -"vers des API plus récentes, facilitant ainsi la transition d'une version " -"à l'autre." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:1013 +#: ../../source/ref-changelog.md:554 +#, fuzzy msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"Amélioration des docs et des docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" -msgstr "Exemple et documentation MXNet" +#: ../../source/ref-changelog.md:556 +#, fuzzy +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:1017 -msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +#: ../../source/ref-changelog.md:558 +#, fuzzy +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" -" fédération ([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:1021 -msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +#: ../../source/ref-changelog.md:560 +#, fuzzy +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -"**Serveur agnostique de sérialisation** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1023 -msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +#: ../../source/ref-changelog.md:562 +#, fuzzy +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -"Le serveur Flower est désormais totalement agnostique en matière de " -"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " -"représente les paramètres sous forme de tableaux NumPy désérialisés) a " -"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " -"Les objets `Parameters` sont totalement agnostiques en matière de " -"sérialisation et représentent les paramètres sous forme de tableaux " -"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " -"d'octets doivent être interprétés (par exemple, pour la " -"sérialisation/désérialisation)." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1025 -msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." -msgstr "" -"Les stratégies intégrées mettent en œuvre cette approche en gérant en " -"interne la sérialisation et la désérialisation de `Weights`. Les " -"implémentations de stratégies personnalisées ou tierces doivent être " -"mises à jour avec les définitions de méthodes de stratégie légèrement " -"modifiées. Les auteurs de stratégies peuvent consulter le PR " -"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " -"stratégies peuvent facilement migrer vers le nouveau format." +#: ../../source/ref-changelog.md:564 +#, fuzzy +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:566 +#, fuzzy +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:568 +#, fuzzy +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:570 +#, fuzzy msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"Déclassé `flwr.server.Server.evaluate`, utiliser " -"`flwr.server.Server.evaluate_round` à la place " -"([#717](https://github.com/adap/flower/pull/717))" - -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"**Initialisation des paramètres côté serveur** " -"([#658](https://github.com/adap/flower/pull/658))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:574 +#, fuzzy msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"Les paramètres du modèle peuvent maintenant être initialisés côté " -"serveur. L'initialisation des paramètres côté serveur fonctionne via une " -"nouvelle méthode `Strategy` appelée `initialize_parameters`." +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"Les stratégies intégrées prennent en charge un nouvel argument du " -"constructeur appelé `initial_parameters` pour définir les paramètres " -"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " -"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " -"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " -"l'un des clients connectés ses paramètres et les utilisera comme " -"paramètres globaux initiaux)." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:580 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " -"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "" +"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " +"pour être énumérées ici." -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:586 +#, fuzzy msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " -"retour** ([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:588 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " -"associant les clés `str` aux valeurs des types suivants : `bool`, " -"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " -"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " -"du serveur !" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:590 +#, fuzzy msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"Cette amélioration a également permis de rendre plus cohérents les types " -"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " -"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " -"d'exemples, et un dictionnaire contenant des valeurs arbitraires " -"spécifiques au problème comme la précision." +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:592 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." -msgstr "" -"Au cas où tu te poserais la question : cette fonctionnalité est " -"compatible avec les projets existants, la valeur de retour supplémentaire" -" du dictionnaire est facultative. Le nouveau code doit cependant migrer " -"vers les nouveaux types de retour pour être compatible avec les " -"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " -"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " -"ci-dessous pour plus de détails." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." +msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:594 +#, fuzzy +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.4.0 (2023-04-21)" + +#: ../../source/ref-changelog.md:600 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"*Exemple de code:* note les valeurs de retour du dictionnaire " -"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/ref-changelog.md:1089 +#: ../../source/ref-changelog.md:604 +#, fuzzy msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"**Généralisé** `config` **argument dans** `Client.fit` **et** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:1091 +#: ../../source/ref-changelog.md:606 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " -"signifie que les valeurs du dictionnaire devaient être des chaînes. La " -"nouvelle version généralise cela pour permettre les valeurs des types " -"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/ref-changelog.md:1093 +#: ../../source/ref-changelog.md:608 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"Cela signifie que l'on peut maintenant passer des valeurs presque " -"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " -"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " -"du côté client !" -#: ../../source/ref-changelog.md:1095 +#: ../../source/ref-changelog.md:610 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" -" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:612 +msgid "" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." +msgstr "" -#: ../../source/ref-changelog.md:1116 +#: ../../source/ref-changelog.md:614 +#, fuzzy msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"Nouvel exemple : PyTorch de centralisé à fédéré " -"([#549](https://github.com/adap/flower/pull/549))" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" -msgstr "Amélioration de la documentation" +#: ../../source/ref-changelog.md:616 +msgid "" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." +msgstr "" -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:618 +#, fuzzy +msgid "" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:620 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." +msgstr "" -#: ../../source/ref-changelog.md:1120 +#: ../../source/ref-changelog.md:622 +#, fuzzy msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"Mise à jour de la documentation des exemples " -"([#549](https://github.com/adap/flower/pull/549))" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:624 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -"Suppression de la documentation obsolète " -"([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" -msgstr "Correction de bogues :" +#: ../../source/ref-changelog.md:626 +#, fuzzy +msgid "**Deprecate Python 3.7**" +msgstr "**Créer le PR**" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:628 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " -"déconnexion des clients est maintenant gérée dans " -"`flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:630 +#, fuzzy +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" +msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" -msgstr "Changements importants :" +#: ../../source/ref-changelog.md:632 +#, fuzzy +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." +msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:634 +#, fuzzy msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -"Ajout d'un exemple pour les périphériques embarqués " -"([#507](https://github.com/adap/flower/pull/507))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1132 +#: ../../source/ref-changelog.md:636 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:638 +#, fuzzy msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -"Déclassement du paquet `flwr_example` et migration des exemples dans le " -"répertoire de premier niveau `examples` " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" -msgstr "Changements incompatibles :" - -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:640 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"Renommé les méthodes de stratégie " -"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" -" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " -"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" -" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " -"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " -"`Strategy` suivantes en conséquence :" - -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:642 +#, fuzzy msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Déclassé `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " -"`FedAvg` à la place." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:1148 +#: ../../source/ref-changelog.md:644 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -"Exemples simplifiés et lignes de base " -"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:646 +#, fuzzy msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " -"de stratégie ([#483](https://github.com/adap/flower/pull/483))." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:1150 +#: ../../source/ref-changelog.md:648 +#, fuzzy msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:650 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"Amélioration des docstrings `Stratégie` " -"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/ref-example-projects.rst:2 +#: ../../source/ref-changelog.md:652 #, fuzzy -msgid "Example projects" -msgstr "Exemples de PyTorch" - -#: ../../source/ref-example-projects.rst:4 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " -"montrent comment Flower peut être utilisé pour fédérer différents types " -"de pipelines d'apprentissage automatique existants, qui s'appuient " -"généralement sur des frameworks d'apprentissage automatique populaires " -"tels que `PyTorch `_ ou `TensorFlow " -"`_." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:654 #, fuzzy msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." - -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " -"d'images CIFAR-10 avec MobileNetV2 :" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-example-projects.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:656 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-changelog.md:658 #, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" -msgstr "" -"`Quickstart TensorFlow (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:19 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -"`Quickstart TensorFlow (Blog Post) `_" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "Démarrage rapide de PyTorch" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:660 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"L'exemple de démarrage rapide PyTorch montre la classification d'images " -"CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:662 #, fuzzy msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -"`Quickstart PyTorch (Code) " -"`_" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:664 #, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" -msgstr "" -"`Quickstart PyTorch (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch : De la centralisation à la fédération" +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-changelog.md:666 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" -" l'aide de Flower :" -#: ../../source/ref-example-projects.rst:37 -#, fuzzy +#: ../../source/ref-changelog.md:668 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"`PyTorch : De la centralisation à la fédération (Code) " -"`_" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:670 #, fuzzy msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"`PyTorch : De la centralisation à la fédération (Tutoriel) " -"`_" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:684 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"Cet exemple montre comment Flower peut être utilisé pour construire un " -"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " -"Jetson :" - -#: ../../source/ref-example-projects.rst:46 -#, fuzzy -msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" -msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " -"`_" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:688 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " -"`_" +"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " +"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:690 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." - -#: ../../source/ref-faq.rst -#, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" +"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " +"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" et un [exemple de code] " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"XGBoost." -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:692 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-faq.rst:10 -#, fuzzy +#: ../../source/ref-changelog.md:694 msgid "" -"`Flower simulation PyTorch " -"`_" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " +"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " +"disposons désormais d'un SDK swift iOS présent sous " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" qui facilitera grandement le processus de création d'applications. Pour " +"présenter son utilisation, l'[exemple " +"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " +"été mis à jour !" -#: ../../source/ref-faq.rst:11 -#, fuzzy +#: ../../source/ref-changelog.md:696 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" -msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " -"sur un Raspberry Pi ?" +"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" +" \"** ([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:698 #, fuzzy msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." -msgstr "" -"Trouve le `blog post about federated learning on embedded device ici " -"`_" -" et l'exemple de code GitHub correspondant " -"`_." - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " -"sur les appareils Android ?" +"Un nouveau [tutoriel d'entrée de gamme] " +"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " +"documentation explique les bases de l'apprentissage fédéré. Il permet à " +"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" +" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " +"l'apprentissage fédéré !" -#: ../../source/ref-faq.rst:19 -#, fuzzy +#: ../../source/ref-changelog.md:700 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -"Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub " -"`_." +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:702 msgid "" -"`Android Kotlin example `_" -msgstr "" - -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" -" ?" +"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " +"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " +"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " +"qui vise à rendre la convergence plus robuste dans des contextes " +"hétérogènes." -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:704 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " -"environnement blockchain est disponible ici :" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:706 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"Cette nouvelle ligne de base reproduit une expérience évaluant les " +"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" +" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " +"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:708 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"`Flower rencontre Nevermined vidéo YouTube " -"`_." +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-faq.rst:30 -#, fuzzy +#: ../../source/ref-changelog.md:710 msgid "" -"`Flower meets KOSMoS `_." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -"`Flower rencontre KOSMoS `_." +"Une nouvelle API REST a été introduite comme alternative à la pile de " +"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " +"prend en charge que les clients anonymes." -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:712 msgid "" -"`Flower meets Talan blog post `_ ." +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -"`Flower meets Talan blog post `_ ." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:714 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." -msgstr "" -"`Flower rencontre Talan Dépôt GitHub " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "Télémétrie" - -#: ../../source/ref-telemetry.md:3 -msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." -msgstr "" -"Le projet open-source Flower recueille des mesures d'utilisation " -"**anonymes** afin de prendre des décisions éclairées pour améliorer " -"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" -" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " -"confrontés." - -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** En restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des mesures d'utilisation anonymes." - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "Principes" - -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "" -"Nous suivons des principes stricts concernant la collecte de données " -"anonymes sur l'utilisation :" - -#: ../../source/ref-telemetry.md:11 -msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " -"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:716 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " -"contiennent aucune information personnelle identifiable (PII). Voir " -"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " -"mesures sont rapportées." +"L'API du pilote est encore une fonction expérimentale, mais cette version" +" introduit quelques améliorations majeures. L'une des principales " +"améliorations est l'introduction d'une base de données SQLite pour " +"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " +"autre amélioration est que les tâches (instructions ou résultats) qui ont" +" été livrées seront désormais supprimées, ce qui améliore " +"considérablement l'efficacité de la mémoire d'un serveur Flower " +"fonctionnant depuis longtemps." -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:718 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " -"sont rapportées ; voir la section \"[Comment inspecter ce qui est " -"rapporté](#how-to-inspect-what-is-being-reported)\"" +"**Répare les problèmes de déversement liés à Ray pendant les " +"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:720 #, fuzzy msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "" -"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " -"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" -"to-contact-us)\" pour plus de détails." - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "Comment se désinscrire" - -#: ../../source/ref-telemetry.md:18 -msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" -msgstr "" -"Lorsque Flower démarre, il vérifie la présence d'une variable " -"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " -"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " -"supposant que tu démarres un serveur ou un client Flower, fais-le " -"simplement en faisant précéder ta commande de la façon suivante :" - -#: ../../source/ref-telemetry.md:24 -msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." -msgstr "" -"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " -"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " -"environnement) pour désactiver la télémétrie de la fleur de façon " -"permanente." - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "Mesures collectées" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "La télémétrie des fleurs recueille les métriques suivantes :" - -#: ../../source/ref-telemetry.md:30 -msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -"**Cela nous aide à décider si nous devons investir des efforts dans la " -"publication d'une version corrective pour une version plus ancienne de " -"Flower ou si nous devons plutôt utiliser la bande passante pour " -"développer de nouvelles fonctionnalités." +"Lors de l'exécution de longues simulations, `ray` déversait parfois " +"d'énormes quantités de données qui rendaient l'entraînement incapable de " +"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:722 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"**Système d'exploitation.** Nous permet de répondre à des questions " -"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " -"Windows ?" +"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:724 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"**Version de Python.** Connaître la version de Python nous aide, par " -"exemple, à décider si nous devons investir des efforts dans la prise en " -"charge des anciennes versions de Python ou cesser de les prendre en " -"charge et commencer à tirer parti des nouvelles fonctionnalités de " -"Python." +"TabNet est un cadre puissant et flexible pour former des modèles " +"d'apprentissage automatique sur des données tabulaires. Nous avons " +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:726 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -"**Comprendre l'environnement matériel dans lequel Flower est utilisé " -"permet de décider si nous devrions, par exemple, faire plus d'efforts " -"pour prendre en charge les environnements à faibles ressources." +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:728 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " -"démarre nous permet de comprendre à quel point certaines fonctionnalités " -"sont utilisées et de mieux établir les priorités en fonction de cela." +"Nous avons maintenant un guide de documentation pour aider les " +"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:730 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " -"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " -"nous permet de comprendre quels types d'appareils non seulement démarrent" -" les charges de travail Flower, mais aussi les terminent avec succès." +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:732 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"**Source.** La télémétrie de Flower essaie de stocker un ID de source " -"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " -"télémétrie est généré. L'ID de source est important pour identifier si un" -" problème est récurrent ou si un problème est déclenché par plusieurs " -"clusters fonctionnant simultanément (ce qui arrive souvent en " -"simulation). Par exemple, si un périphérique exécute plusieurs charges de" -" travail en même temps, et que cela entraîne un problème, alors, afin de " -"reproduire le problème, plusieurs charges de travail doivent être " -"démarrées en même temps." +"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " +"les mesures d'entraînement, mais les versions précédentes " +"n'enregistraient pas les résultats dans l'objet `History`. c'est " +"désormais le cas !" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:734 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " -"souhaites que tous les événements enregistrés sous un identifiant de " -"source spécifique soient supprimés, tu peux envoyer une demande de " -"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " -"Tous les événements liés à cet identifiant de source seront alors " -"définitivement supprimés." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/ada" -#: ../../source/ref-telemetry.md:46 -msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." -msgstr "" -"Nous ne collecterons aucune information personnelle identifiable. Si tu " -"penses que l'une des métriques collectées pourrait être utilisée à " -"mauvais escient de quelque manière que ce soit, merci de [nous " -"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " -"refléter toute modification des métriques collectées et nous publierons " -"les changements dans le journal des modifications (changelog)." +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:748 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"Si tu penses que d'autres mesures nous seraient utiles pour mieux " -"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " -"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " -"la vie privée des utilisateurs, nous pourrons les ajouter." - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "Comment inspecter ce qui est rapporté" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:752 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " -"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " -"informations de télémétrie rapportées en définissant la variable " -"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " -"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " -"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " -"sans envoyer de mesures." +"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " +"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:754 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " -"anonymes, utilise les deux variables d'environnement :" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "Comment nous contacter" +"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " +"qui peut être utilisé pour identifier la charge de travail à laquelle une" +" tâche appartient. Elle prend également en charge un nouveau `group_id` " +"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " +"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " +"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:756 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " -"traitons les mesures d'utilisation anonymes, contacte-nous via " -"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " -"courriel (`telemetry@flower.ai`)." +"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " +"flotte soit configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:758 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" +"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " +"API) peut maintenant configurer l'adresse du serveur de Driver API (via " +"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " +"de son démarrage :" -#: ../../source/tutorial-quickstart-android.rst:5 -#, fuzzy -msgid "Quickstart Android" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:760 #, fuzzy msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" +"address \"0.0.0.0:8086\" ``" -#: ../../source/tutorial-quickstart-android.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." + +#: ../../source/ref-changelog.md:764 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:766 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" +"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " +"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "Démarrage rapide fastai" - -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:768 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " +"dernières versions d'Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:770 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"L'exemple de code Android a reçu une mise à jour substantielle : le " +"projet est compatible avec Flower 1.0 et les versions ultérieures, " +"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " +"est mis à jour pour être compatible avec les outils Android les plus " +"récents." -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:772 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "Démarrage rapide 🤗 Transformateurs" - -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:774 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"Construisons un système d'apprentissage fédéré à l'aide des " -"transformateurs Hugging Face et de Flower !" +"Cette " +"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" est presque identique à " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " +"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " +"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" +" rapport aux modèles globaux." -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:776 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "Dépendances" +"**Ajouter de nouvelles métriques aux événements de télémétrie** " +"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:778 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " -":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " -":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "Flux de travail standard pour le visage" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "Traitement des données" +"Une structure d'événements mise à jour permet, par exemple, de regrouper " +"des événements au sein d'une même charge de travail." -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:780 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " -":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " -"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" -" fonction :code:`load_data` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "Former et tester le modèle" +"**Ajouter une nouvelle section de tutoriel sur les stratégies " +"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:782 +#, fuzzy msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"Une fois que nous avons trouvé un moyen de créer notre trainloader et " -"notre testloader, nous pouvons nous occuper de l'entraînement et du test." -" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" -" :code:`PyTorch` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "Créer le modèle lui-même" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" +" : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" +"-Strategy-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:784 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -"Pour créer le modèle lui-même, nous allons simplement charger le modèle " -"distillBERT pré-entraîné en utilisant le " -":code:`AutoModelForSequenceClassification` de Hugging Face :" +"**Ajouter une nouvelle section de tutoriel sur la sérialisation " +"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "Fédérer l'exemple" +#: ../../source/ref-changelog.md:786 +#, fuzzy +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" +msgstr "" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la sérialisation personnalisée : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "Création du client IMDBC" +#: ../../source/ref-changelog.md:788 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" +msgstr "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/ada" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:792 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " -"écrire notre classe de client Flower (héritant de " -":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" -" un modèle :code:`PyTorch` standard :" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -"La fonction :code:`get_parameters` permet au serveur d'obtenir les " -"paramètres du client. Inversement, la fonction :code:`set_parameters` " -"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " -":code:`fit` forme le modèle localement pour le client, et la fonction " -":code:`evaluate` teste le modèle localement et renvoie les mesures " -"correspondantes." +"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " +"autre étape dans notre effort pour faire de la documentation de Flower la" +" meilleure documentation de tout projet. Reste à l'écoute et comme " +"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "Démarrer le serveur" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:806 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"Maintenant que nous avons un moyen d'instancier les clients, nous devons " -"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " -"être fait très facilement en choisissant d'abord une stratégie (ici, nous" -" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " -"moyenne des poids de tous les clients à chaque tour) et en utilisant " -"ensuite la fonction :code:`flwr.server.start_server` :" +"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:810 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"La fonction :code:`weighted_average` est là pour fournir un moyen " -"d'agréger les mesures réparties entre les clients (en gros, cela nous " -"permet d'afficher une belle moyenne de précision et de perte pour chaque " -"tour)." - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "Tout assembler" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" +"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-changelog.md:812 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" +msgstr "" +"Au cours des prochaines semaines, nous publierons un certain nombre de " +"nouvelles implémentations de référence utiles en particulier pour les " +"nouveaux venus en FL. Elles revisiteront généralement des articles bien " +"connus de la littérature, et seront adaptées à l'intégration dans votre " +"propre application ou à l'expérimentation, afin d'approfondir votre " +"connaissance de FL en général. La publication d'aujourd'hui est la " +"première de cette série. [Lire la " +"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" +"cnn/)" -#: ../../source/tutorial-quickstart-huggingface.rst:223 -#, fuzzy +#: ../../source/ref-changelog.md:814 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " -"l'exemple de code complet : " -"[https://github.com/adap/flower/tree/main/examples/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:816 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " -"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " -"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" -" Flower." +"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " +"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " +"certaines des leçons durement apprises lors de la mise à l'échelle des " +"simulations dans des environnements de grappes de GPU. De nouveaux " +"paramètres par défaut rendent l'exécution des simulations basées sur les " +"GPU beaucoup plus robuste." -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:818 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" -" aurions très bien pu utiliser :code:`TensorFlow`." +"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " +"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:820 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" +"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " +"toujours été faciles à utiliser sur les instances GPU. Nous les avons " +"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " +"Découvre les carnets mis à jour ici :" -#: ../../source/tutorial-quickstart-ios.rst:5 +#: ../../source/ref-changelog.md:822 #, fuzzy -msgid "Quickstart iOS" -msgstr "Démarrage rapide XGBoost" +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" +"[Une introduction à l'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:823 #, fuzzy msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"Dans ce tutoriel, nous allons apprendre, comment former un réseau " -"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." +"[Stratégies d'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/ref-changelog.md:824 #, fuzzy msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"[Construire une stratégie] " +"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" +"PyTorch.html)" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:825 #, fuzzy msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" -#: ../../source/tutorial-quickstart-ios.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:827 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-quickstart-ios.rst:21 -#, fuzzy +#: ../../source/ref-changelog.md:829 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" +"À la suite d'une [demande de commentaires] " +"(https://github.com/adap/flower/issues/1534) de la part de la communauté," +" le projet open-source Flower introduit la collecte optionnelle de " +"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " +"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " +"comment Flower est utilisé et quels sont les défis auxquels les " +"utilisateurs peuvent être confrontés." -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Client de la fleur" - -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:831 +#, fuzzy msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** Restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " +"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:833 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:835 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" +"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" +" permettra de créer des applications Federated Learning et Federated " +"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " +"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " +"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" +" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:837 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" +"L'API du pilote permet également un nouveau mode d'exécution dans lequel " +"le serveur s'exécute indéfiniment. Plusieurs charges de travail " +"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " +"leur exécution indépendamment du serveur. Ceci est particulièrement utile" +" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:839 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" +"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " +"attendons tes commentaires avec impatience !" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:841 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" +"Remarque : *L'API du pilote est encore expérimentale et est susceptible " +"de changer de manière significative au fil du temps.*" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:843 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Serveur de Flower" - -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:845 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -"Pour les charges de travail simples, nous pouvons démarrer un serveur " -"Flower et laisser toutes les possibilités de configuration à leurs " -"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " -"Flower et démarre le serveur :" - -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "Entraîne le modèle, fédéré !" +"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:847 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " -"généralement un serveur et plusieurs clients. Nous devons donc commencer " -"par démarrer le serveur :" +"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:849 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" +"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " +"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " +"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/tutorial-quickstart-ios.rst:156 -#, fuzzy +#: ../../source/ref-changelog.md:851 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " +"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:853 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" +"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" +" compatible avec la dernière version de Flower." -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "Démarrage rapide de JAX" - -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:855 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" - -#: ../../source/tutorial-quickstart-pandas.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:859 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:863 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" +"L'un des points forts est le nouveau [guide du premier contributeur] " +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" + +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:873 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -"Dans ce tutoriel, nous allons apprendre à entraîner un réseau neuronal " -"convolutif sur CIFAR10 à l'aide de Flower et PyTorch." +"Nous aimerions **remercier tout particulièrement** tous les contributeurs" +" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -#, fuzzy +#: ../../source/ref-changelog.md:875 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:879 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:881 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"Le premier aperçu (expérimental) des wrappers enfichables de " +"confidentialité différentielle permet de configurer et d'utiliser " +"facilement la confidentialité différentielle (DP). Les wrappers DP " +"enfichables permettent une utilisation agnostique du cadre **et** de la " +"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " +"voir les documents de Flower, un nouvel explicatif va plus loin dans les " +"détails." -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:883 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:885 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -"Puisque nous voulons utiliser PyTorch pour résoudre une tâche de vision " -"par ordinateur, allons-y et installons PyTorch et la bibliothèque " -"**torchvision** :" +"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " +"clients Flower peuvent être construits pour iOS. L'exemple de code " +"contient à la fois des composants Flower iOS SDK qui peuvent être " +"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " +"sur CoreML." -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:887 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons une " -"formation distribuée simple avec deux clients et un serveur. Notre " -"procédure de formation et l'architecture de notre réseau sont basées sur " -"`Deep Learning with PyTorch " -"`_ de" -" PyTorch." +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:889 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "En outre, nous définissons l'attribution des appareils dans PyTorch avec :" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:891 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -"Nous utilisons PyTorch pour charger CIFAR10, un ensemble de données de " -"classification d'images colorées populaire pour l'apprentissage " -"automatique. Le :code:`DataLoader()` de PyTorch télécharge les données " -"d'entraînement et de test qui sont ensuite normalisées." +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:893 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -"Définis la perte et l'optimiseur avec PyTorch L'entraînement de " -"l'ensemble de données se fait en bouclant sur l'ensemble de données, en " -"mesurant la perte correspondante et en l'optimisant." +"Toutes les exceptions `Client` qui se produisent dans le VCE sont " +"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" +" `Stratégie` configurée (via l'argument `failures`)." -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:895 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -"Définis ensuite la validation du réseau d'apprentissage automatique. Nous" -" passons en boucle sur l'ensemble de test et mesurons la perte et la " -"précision de l'ensemble de test." +"**Améliorer le moteur du client virtuel** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:897 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"Après avoir défini l'entraînement et le test d'un modèle d'apprentissage " -"automatique PyTorch, nous utilisons les fonctions pour les clients " -"Flower." +"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " +"dictionnaire `client_resources` a été remplacé par `float` pour permettre" +" l'allocation de fractions de ressources." -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:899 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -"Les clients de Flower utiliseront un CNN simple adapté de \"PyTorch : A " -"60 Minute Blitz\" :" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:901 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -"Après avoir chargé l'ensemble des données avec :code:`load_data()`, nous " -"définissons l'interface Flower." +"Le moteur de client virtuel prend désormais en charge les méthodes " +"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:903 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " -"le réseau neuronal que nous avons défini plus tôt)." +"**Fournir des informations de type aux paquets en utilisant** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:905 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise PyTorch. Mettre en œuvre :code:`NumPyClient` signifie" -" généralement définir les méthodes suivantes (:code:`set_parameters` est " -"cependant facultatif) :" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (optionnel)" +"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " +"indiquant que le paquet est typé. Cela permet de prendre en charge le " +"typage pour les projets ou les paquets qui utilisent `flwr` en leur " +"permettant d'améliorer leur code à l'aide de vérificateurs de types " +"statiques comme `mypy`." -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:907 msgid "" -"update the local model weights with the parameters received from the " -"server" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"mettre à jour les poids du modèle local avec les paramètres reçus du " -"serveur" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "fixe les poids du modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "entraîne le modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "recevoir les poids du modèle local mis à jour" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "teste le modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "qui peut être mis en œuvre de la manière suivante :" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:909 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/ref-changelog.md:911 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." +"**Documentation mise à jour** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:913 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " -"dans différents terminaux. Ouvre un nouveau terminal et démarre le " -"premier client :" - -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +"Il y a eu tellement de mises à jour de la documentation que cela n'a même" +" pas de sens de les énumérer individuellement." -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:915 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -"Chaque client aura son propre ensemble de données. Tu devrais maintenant " -"voir comment la formation se déroule dans le tout premier terminal (celui" -" qui a démarré le serveur) :" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-quickstart-pytorch.rst:271 -#, fuzzy +#: ../../source/ref-changelog.md:917 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-pytorch`." +"La documentation a été restructurée pour faciliter la navigation. Ce " +"n'est que la première étape d'un effort plus important visant à faire de " +"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:919 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "Démarrage rapide de PyTorch Lightning" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 -#, fuzzy +#: ../../source/ref-changelog.md:921 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant PyTorch " -"Lightning et Flower !" +"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " +"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " +"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " +"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " +"il te suffit d'un simple clic." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:923 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:925 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" +"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" +" parties couvrant les stratégies personnalisées (encore WIP) et la " +"distinction entre `Client` et `NumPyClient`. Les parties un et deux " +"existantes ont également été améliorées (beaucoup de petits changements " +"et de corrections)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "Démarrage rapide de scikit-learn" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 -msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." -msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " -"régression logistique` sur MNIST en utilisant Flower et scikit-learn." +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "Points forts" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy -msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." -msgstr "" -"Il est recommandé de créer un environnement virtuel et de tout exécuter " -"dans ce `virtualenv `_." +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" + +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "`get_parameters` configurable" + +#: ../../source/ref-changelog.md:938 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -"*Les clients* sont chargés de générer des mises à jour individuelles des " -"paramètres du modèle en fonction de leurs ensembles de données locales. " -"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " -"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " -"version améliorée du modèle à chaque *client*. Un cycle complet de mises " -"à jour des paramètres s'appelle un *round*." +"Des tonnes de petits nettoyages d'API résultant en une expérience plus " +"cohérente pour les développeurs" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:942 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -#, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" +"Nous tenons à remercier **particulièrement** tous les contributeurs qui " +"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors)) :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:944 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" -msgstr "" -"Maintenant que toutes nos dépendances sont installées, exécutons une " -"formation distribuée simple avec deux clients et un serveur. Cependant, " -"avant de configurer le client et le serveur, nous allons définir toutes " -"les fonctionnalités dont nous avons besoin pour notre configuration " -"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " -"contient différentes fonctions définissant toutes les bases de " -"l'apprentissage automatique :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -"Renvoie les paramètres d'un modèle de régression logistique " -":code:`sklearn`" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" +#: ../../source/ref-changelog.md:948 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" +"**Tous les arguments doivent être passés comme des arguments de mot-clé**" +" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#: ../../source/ref-changelog.md:950 #, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -"Tu peux consulter :code:`utils.py` `ici " -"`_ pour plus de détails. Les fonctions prédéfinies sont " -"utilisées dans :code:`client.py` et importées. :code:`client.py` " -"nécessite également d'importer plusieurs paquets tels que Flower et " -"scikit-learn :" +"Le code qui utilise des arguments positionnels (par exemple, " +"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" +"clé pour chaque argument positionnel (par exemple, " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:952 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" +"**Introduire l'objet de configuration** `ServerConfig` **dans** " +"`start_server` **et** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:954 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -"Ensuite, le modèle de régression logistique est défini et initialisé avec" -" :code:`utils.set_initial_params()`." +"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " +"attendent maintenant un objet de configuration de type " +"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " +"le dict de configuration précédent, mais il rend l'écriture de code " +"sécurisé plus facile et les valeurs des paramètres par défaut plus " +"transparentes." -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:956 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" -" la régression logistique que nous avons définie plus tôt)." +"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:958 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " -"signifie généralement définir les méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" +"Les paramètres de stratégie intégrés suivants ont été renommés pour " +"améliorer la lisibilité et la cohérence avec d'autres API :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "est directement importé avec :code:`utils.set_model_params()`" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 -msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -#, fuzzy +#: ../../source/ref-changelog.md:964 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons " -":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " -"véritablement fédérée avec le serveur et les clients s'exécutant sur des " -"machines différentes, tout ce qui doit changer est :code:`server_address`" -" que nous transmettons au client." +"**Mettre à jour les arguments par défaut des stratégies intégrées** " +"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:966 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " -"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" -" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" -"learn." +"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" +" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " +"les clients actuellement disponibles pour l'entraînement et l'évaluation." +" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " +"peuvent retrouver le comportement antérieur en initialisant la stratégie " +"de la manière suivante :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, importe Flower et démarre le serveur :" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -#, fuzzy +#: ../../source/ref-changelog.md:970 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Le nombre de tours d'apprentissage fédéré est défini dans " -":code:`fit_round()` et l'évaluation est définie dans " -":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " -"chaque tour d'apprentissage fédéré et te donne des informations sur la " -"perte et la précision." +"**Ajouter** `server_round` **à** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:972 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -"Le :code:`main` contient l'initialisation des paramètres côté serveur " -":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " -":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " -"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" -" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" -" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/ref-changelog.md:974 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " -"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " -"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" -" commencer par lancer le serveur :" +"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" +" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:976 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" -"mnist`." +"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " +"maintenant trois paramètres : (1) le cycle actuel " +"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" +" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" +" (`config`)." -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:978 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "Démarrage rapide de TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/ref-changelog.md:980 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " -"code !" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" +"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " +"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " +"`aggregate_evaluate`) reçoivent le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" +" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " +"renommé de `rnd` à `server_round`." -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:982 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " -"également installer TF :" +"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "" -"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " -"TensorFlow :" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:986 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " -"ensemble de données de classification d'images colorées populaire pour " -"l'apprentissage automatique. L'appel à " -":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " -"en cache localement, puis renvoie l'ensemble d'entraînement et de test " -"sous forme de NumPy ndarrays." +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:988 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " -"nous utilisons MobilNetV2 avec 10 classes de sortie :" +"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " +"`FedFSv0`, `FedFSv1`)." -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:990 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " -"méthodes qui peuvent être mises en œuvre de la manière suivante :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "Chaque client aura son propre ensemble de données." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:992 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -"Tu devrais maintenant voir comment la formation se déroule dans le tout " -"premier terminal (celui qui a démarré le serveur) :" +"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " +"rendre compte de la nature de ce type." -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -#, fuzzy +#: ../../source/ref-changelog.md:994 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le `code source complet " -"`_ pour cela se trouve dans :code:`examples" -"/quickstart-tensorflow/client.py`." +"**Supprimez l'ancien** `force_final_distributed_eval` **de** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:996 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" +"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " +"été un artefact historique, dans cette version il a finalement disparu " +"pour de bon." -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "Démarrage rapide XGBoost" - -#: ../../source/tutorial-quickstart-xgboost.rst:14 -#, fuzzy -msgid "Federated XGBoost" -msgstr "Formation fédérée" - -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:998 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:1000 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" +"La méthode `get_parameters` accepte maintenant un dictionnaire de " +"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/tutorial-quickstart-xgboost.rst:23 -#, fuzzy -msgid "Why federated XGBoost?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:1002 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" +"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" +" `config` **paramètre** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:1004 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" +"La fonction `start_simulation` accepte maintenant un dictionnaire de " +"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" +" cohérence entre `start_simulation` et `start_server` et facilite la " +"transition entre les deux." -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:1008 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/ref-changelog.md:1010 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" +"La version précédente de Flower a introduit la prise en charge " +"expérimentale de Python 3.10, cette version déclare la prise en charge de" +" Python 3.10 comme stable." -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:1012 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" +"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " +"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-quickstart-xgboost.rst:47 -#, fuzzy +#: ../../source/ref-changelog.md:1014 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" +"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " +"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " +"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " +"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " +"l'évaluation centralisée !" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:1016 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-quickstart-xgboost.rst:60 -#, fuzzy +#: ../../source/ref-changelog.md:1018 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" -msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" +"Comme pour `start_server`, `start_simulation` accepte maintenant une " +"instance complète de `Server`. Cela permet aux utilisateurs de " +"personnaliser fortement l'exécution des expériences et ouvre la porte à " +"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " +"virtuel." -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:1020 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:1022 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" +"De nombreux exemples de code ont reçu de petites ou même de grandes mises" +" à jour de maintenance" -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." -msgstr "" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" -#: ../../source/tutorial-quickstart-xgboost.rst:134 -msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr "" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch` (démarrage rapide)" -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." -msgstr "" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "" +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" -#: ../../source/tutorial-quickstart-xgboost.rst:183 -msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." -msgstr "" +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow` (en anglais)" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:1031 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" +"**Supprime l'exemple de simulation obsolète** " +"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:1033 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" +"Supprime l'exemple obsolète `simulation` et renomme " +"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" +" au nom de `simulation_pytorch`" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:1035 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:1037 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" +"Une mise à jour substantielle de la documentation corrige plusieurs " +"petits problèmes de rendu, rend les titres plus succincts pour améliorer " +"la navigation, supprime une bibliothèque obsolète, met à jour les " +"dépendances de la documentation, inclut le module `flwr.common` dans la " +"référence de l'API, inclut le support de la documentation basée sur le " +"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " +"nombre de détails plus petits !" + +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**Mises à jour mineures**" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:1041 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" +"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " +"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:1042 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" +"Ajouter une connexion gRPC sécurisée à l'exemple de code " +"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-quickstart-xgboost.rst:294 -#, fuzzy +#: ../../source/ref-changelog.md:1043 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-xgboost.rst:300 -#, fuzzy +#: ../../source/ref-changelog.md:1044 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." +"Renomme les messages ProtoBuf pour améliorer la cohérence " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-xgboost.rst:311 -#, fuzzy +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" + +#: ../../source/ref-changelog.md:1050 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:1052 #, fuzzy msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés au MXNet :" +"La première version préliminaire de Flower Baselines est arrivée ! Nous " +"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " +"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html). Avec cette première version préliminaire, nous invitons " +"également la communauté à [contribuer à leurs propres lignes de " +"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/ref-changelog.md:1054 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" +"**SDK client C++ (aperçu) et exemple de code** " +"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:1056 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" +"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " +"code de démarrage rapide qui démontre un client C++ simple utilisant le " +"SDK." -#: ../../source/tutorial-quickstart-xgboost.rst:342 -#, fuzzy -msgid "Then, we start the server:" -msgstr "Démarrer le serveur" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +#: ../../source/ref-changelog.md:1058 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:1060 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" +"Python 3.10 est la dernière version stable de Python et Python 3.11 " +"devrait sortir en octobre. Cette version de Flower ajoute une prise en " +"charge expérimentale pour les deux versions de Python." -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:1062 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" +"**Agréger des mesures personnalisées grâce à des fonctions fournies par " +"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:1064 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" +"Les stratégies intégrées prennent en charge deux nouveaux arguments, " +"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " +"permettent de passer des fonctions d'agrégation de métriques " +"personnalisées." -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:1066 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" +"**Temps d'attente configurable par l'utilisateur** " +"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:1068 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" +"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " +"valeur `float` en secondes), le serveur attendra *au moins* " +"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:1070 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" +"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " +"l'évaluation centralisée dans toutes les stratégies intégrées** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-quickstart-xgboost.rst:590 -#, fuzzy +#: ../../source/ref-changelog.md:1072 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"Les stratégies intégrées peuvent maintenant effectuer une évaluation " +"fédérée (c'est-à-dire côté client) et une évaluation centralisée " +"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " +"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/ref-changelog.md:1074 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" +"**Deux nouveaux tutoriels Jupyter Notebook** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:1076 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" +"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " +"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -#, fuzzy -msgid "Cyclic training" -msgstr "Formation centralisée" - -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:1078 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" +"*Introduction à l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:1080 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" +"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:1082 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" +"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" +" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:1084 +#, fuzzy msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" +"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " +"momentum du serveur [Hsu et al., 2019]." -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:1086 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/ref-changelog.md:1088 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" +"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " +"fleur avancés avec PyTorch." -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:1090 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -#, fuzzy -msgid "Customised centralised/distributed evaluation" -msgstr "Évaluation centralisée" - -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:1092 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" +"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " +"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:1096 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" +"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " +"initialisé dans `start_simulation` " +"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:1097 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-quickstart-xgboost.rst:831 +#: ../../source/ref-changelog.md:1098 #, fuzzy -msgid "Flower simulation" -msgstr "Simulation de moniteur" - -#: ../../source/tutorial-quickstart-xgboost.rst:832 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" +"Nouvelle documentation pour [mettre en œuvre des " +"stratégies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:1099 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" +"Nouveau thème de documentation adapté aux mobiles " +"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:1100 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" +"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " +"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" +"**Supprime la prise en charge obsolète de Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:1105 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:1106 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" +"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" +" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#: ../../source/ref-changelog.md:1108 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" +"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:1109 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**Supprime la stratégie DefaultStrategy qui est obsolète** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +#: ../../source/ref-changelog.md:1110 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**Supprimer la prise en charge obsolète de la valeur de retour de la " +"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:1111 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" +"**Supprime la prise en charge obsolète du passage des paramètres initiaux" +" en tant que ndarrays NumPy** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" -msgstr "" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:1117 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" +"**Amélioration de la compatibilité du moteur de client virtuel avec " +"Jupyter Notebook / Google Colab** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -#, fuzzy -msgid "Example commands" -msgstr "Exemples de PyTorch" - -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/ref-changelog.md:1119 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" +"Les simulations (utilisant le moteur de client virtuel via " +"`start_simulation`) fonctionnent maintenant plus facilement sur les " +"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " +"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -#, fuzzy -msgid "Then, on each client terminal, we start the clients:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" - -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:1121 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" +"**Nouvel exemple de code Jupyter Notebook** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:1123 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -#, fuzzy -msgid "Build a strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +"Un nouvel exemple de code (`quickstart_simulation`) démontre des " +"simulations de Flower en utilisant le moteur de client virtuel via " +"Jupyter Notebook (y compris Google Colab)." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:1125 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__) " -"et nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et sur les clients " -"(`partie 2 `__)." +"**Propriétés du client (aperçu des fonctionnalités)** " +"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1127 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -"Dans ce carnet, nous allons continuer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit précédemment en créant " -"une version personnalisée de FedAvg (encore une fois, en utilisant " -"`Flower `__ et `PyTorch `__)." +"Les clients peuvent implémenter une nouvelle méthode `get_properties` " +"pour permettre aux stratégies côté serveur d'interroger les propriétés du" +" client." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:1129 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "Préparation" +"**Support expérimental d'Android avec TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1131 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"Avant de commencer le code proprement dit, assurons-nous que nous " -"disposons de tout ce dont nous avons besoin." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "Installation des dépendances" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "Tout d'abord, nous installons les paquets nécessaires :" +"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" +" la fois agnostique au niveau du client et du cadre de travail. On peut " +"intégrer des plates-formes client arbitraires et avec cette version, " +"l'utilisation de Flower sur Android est devenue beaucoup plus facile." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:1133 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -"Maintenant que toutes les dépendances sont installées, nous pouvons " -"importer tout ce dont nous avons besoin pour ce tutoriel :" +"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " +"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " +"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " +"part entière et une implémentation unifiée de `FedAvg` intégrant la " +"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:1135 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "Chargement des données" +"**Rendre le temps de garde gRPC configurable par l'utilisateur et " +"diminuer le temps de garde par défaut** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:1137 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation), et enveloppons le tout dans " -"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " -"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " -"différents nombres de clients." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "Formation/évaluation du modèle" +"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " +"compatibilité de Flower avec davantage d'environnements cloud (par " +"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " +"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " +"spécifiques." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:1139 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -"Continuons avec la définition habituelle du modèle (y compris " -"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " -"et de test :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Client de Flower" +"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " +"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:1141 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" -"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " -"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " -"transmettons également le ``cid`` au client et l'utilisons pour consigner" -" des détails supplémentaires :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " +"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1143 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " -"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " -"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " -"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " -"changerons ensuite le dictionnaire de configuration (l'un des attributs " -"``FitIns``)." +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:1145 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " -"créée ``FedCustom`` lors du démarrage de l'expérience :" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "Récapitulation" +"Un nouvel exemple de code (`quickstart_huggingface`) démontre " +"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:1147 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"Dans ce carnet, nous avons vu comment mettre en place une stratégie " -"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " -"sur la configuration des nœuds clients, l'agrégation des résultats, et " -"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " -"d'écraser les méthodes abstraites de la classe de base (abstraite) " -"``Strategy``. Pour rendre les stratégies personnalisées encore plus " -"puissantes, tu peux passer des fonctions personnalisées au constructeur " -"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " -"chaque fois que c'est nécessaire." +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:1149 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " -"Slack : `Join Slack `__" +"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" +" MLCube avec Flower." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:1151 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " -"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -#, fuzzy +#: ../../source/ref-changelog.md:1153 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " -"`__ présente ``Client``, l'API flexible qui sous-tend " -"``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -#, fuzzy -msgid "Customize the client" -msgstr "Création du client IMDBC" +"SSL permet d'établir des connexions cryptées et sécurisées entre les " +"clients et les serveurs. Cette version met en open-source " +"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " +"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:1155 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__), " -"nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et les clients " -"(`partie 2 `__), et nous avons construit notre propre stratégie " -"personnalisée à partir de zéro (`partie 3 - WIP " -"`__)." +"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:1157 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " -"nouvelle classe de base pour construire des clients, simplement appelée " -"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " -"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" -" travail avec les bibliothèques d'apprentissage automatique qui ont une " -"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " -"flexibilité que nous n'avions pas auparavant, mais nous devrons également" -" faire quelques choses que nous n'avions pas à faire auparavant." +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:1159 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -"Allons plus loin et voyons ce qu'il faut faire pour passer de " -"``NumPyClient`` à ``Client`` !" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "Étape 0 : Préparation" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:1161 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation) et enveloppons le tout dans " -"leur propre ``DataLoader``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "Étape 1 : Revoir NumPyClient" +"`start_simulation` peut maintenant être appelé avec une liste " +"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " +"identifiants seront passés à `client_fn` chaque fois qu'un client doit " +"être initialisé, ce qui peut faciliter le chargement de partitions de " +"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:1165 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -"Jusqu'à présent, nous avons implémenté notre client en sous-classant " -"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " -"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " -"nous enveloppons la création d'instances de cette classe dans une " -"fonction appelée ``client_fn`` :" +"Mettre à jour le calcul de `num_examples` dans les exemples de code " +"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:1166 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " -"présent. La seule *petite* différence par rapport au carnet précédent est" -" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " -"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " -"que nous obtenons :" +"Exposer la version de Flower à travers `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:1167 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " -"d'apprentissage fédéré." +"`start_server` dans `app.py` renvoie maintenant un objet `History` " +"contenant les métriques de l'entraînement " +"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1168 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"Plongeons un peu plus profondément et discutons de la façon dont Flower " -"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " -"effectuer un travail, ``start_simulation`` appelle la fonction " -"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" -" (en même temps qu'il charge le modèle et les données)." +"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:1169 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -"Mais voici la partie la plus surprenante : Flower n'utilise pas " -"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " -"l'objet pour le faire ressembler à une sous-classe de " -"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " -"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " -"seulement comment gérer les `Client`. `NumPyClient` est juste une " -"abstraction de commodité construite au dessus de `Client`." +"Augmente le temps de sommeil après le démarrage du serveur à trois " +"secondes dans tous les exemples de code " +"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:1170 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " -"directement par-dessus `Client``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" +"Ajout d'une nouvelle section FAQ à la documentation " +"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:1171 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -"Essayons de faire la même chose en utilisant ``Client`` au lieu de " -"``NumPyClient``." +"Et bien d'autres changements sous le capot, des mises à jour de la " +"bibliothèque, des modifications de la documentation et des améliorations " +"de l'outillage !" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:1175 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " -"devons nous assurer que notre nouveau client basé sur le ``Client`` " -"fonctionne, n'est-ce pas ?" +"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " +"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:1177 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " -"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " -"différence ?" +"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " +"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " +"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " +"supprimés dans une prochaine version." + +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1183 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"**Moteur expérimental de client virtuel** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:1185 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -"La seule *vraie* différence entre Client et NumPyClient est que " -"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " -"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " -"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " -"Cela permet de travailler avec des bibliothèques d'apprentissage " -"automatique qui ont une bonne prise en charge de NumPy (la plupart " -"d'entre elles) en un clin d'œil." +"L'un des objectifs de Flower est de permettre la recherche à grande " +"échelle. Cette version donne un premier aperçu (expérimental) d'une " +"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " +"client virtuel. Les clients virtuels permettent des simulations qui " +"s'étendent à un (très) grand nombre de clients sur une seule machine ou " +"une grappe de calcul. La façon la plus simple de tester la nouvelle " +"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" +" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:1187 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "Étape 3 : Sérialisation personnalisée" +"La fonction est encore expérimentale, il n'y a donc aucune garantie de " +"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " +"prime time et s'accompagne de quelques mises en garde connues. Cependant," +" les personnes curieuses sont encouragées à l'essayer et à faire part de " +"leurs réflexions." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:1189 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"Nous allons ici explorer comment mettre en œuvre une sérialisation " -"personnalisée à l'aide d'un exemple simple." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:1191 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " -"simplement le processus de conversion d'un objet en octets bruts, et tout" -" aussi important, la désérialisation est le processus de reconversion des" -" octets bruts en objet. Ceci est très utile pour la communication réseau." -" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " -"objet Python par Internet." +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:1192 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"L'apprentissage fédéré s'appuie fortement sur la communication Internet " -"pour la formation en envoyant des objets Python dans les deux sens entre " -"les clients et le serveur, ce qui signifie que la sérialisation est un " -"élément essentiel de l'apprentissage fédéré." +"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " +"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:1194 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -"Dans la section suivante, nous allons écrire un exemple de base où, au " -"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " -"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " -"éparses, avant de les envoyer. Cette technique peut être utilisée pour " -"économiser de la bande passante, car dans certains cas où les poids d'un " -"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " -"une matrice éparse peut grandement améliorer leur taille en octets." +"**Nouvel exemple de code PyTorch Lightning** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" +"**Nouvel exemple de code d'autocodage variationnel** " +"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:1198 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -"C'est là que la véritable sérialisation/désérialisation se produira, en " -"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " -"``sparse_bytes_to_ndarray`` pour la désérialisation." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:1200 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " -"convertir nos tableaux." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "Côté client" +#: ../../source/ref-changelog.md:1204 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" +"Amélioration de l'exemple de code TensorFlow avancé " +"([#769](https://github.com/adap/flower/pull/769))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1205 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " -"suffira d'appeler nos fonctions personnalisées dans notre " -"``flwr.client.Client``." +"Avertissement lorsque `min_available_clients` est mal configuré " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:1206 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " -"que nous avons obtenus de notre réseau en utilisant nos " -"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:1207 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " -"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " -"personnalisé, puis nous devons sérialiser nos résultats locaux avec " -"``ndarrays_to_sparse_parameters``." +"Amélioration du message d'erreur dans `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:1208 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " -"paramètres globaux avec notre fonction personnalisée." +"Exemple de code de démarrage rapide PyTorch amélioré " +"([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "Côté serveur" +#: ../../source/ref-changelog.md:1212 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" +"**Désactivé l'évaluation finale distribuée** " +"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:1214 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." -" Pour modifier la sérialisation et la désérialisation ici, il suffit de " -"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " -"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " -"super-classe ``FedAvg``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" +"Le comportement précédent consistait à effectuer un dernier tour " +"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " +"souvent pas nécessaire (par exemple, lors de l'utilisation de " +"l'évaluation côté serveur). Le comportement précédent peut être activé en" +" passant `force_final_distributed_eval=True` à `start_server`." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:1216 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " -"résultat que nous avons reçu :" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "Puis sérialise le résultat agrégé :" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -"Nous pouvons maintenant exécuter notre exemple de sérialisation " -"personnalisée !" +"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " +"refléter la notation donnée dans l'article original (q-FFL est l'objectif" +" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " +"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " +"des raisons de compatibilité (elle sera supprimée dans une prochaine " +"version)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1220 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"Dans cette partie du tutoriel, nous avons vu comment construire des " -"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " -"``NumPyClient`` est une abstraction de commodité qui facilite le travail " -"avec les bibliothèques d'apprentissage automatique qui ont une bonne " -"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " -"nous permet de faire des choses qui ne sont pas possibles dans " -"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " -"sérialisation et la désérialisation des paramètres." +"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:1222 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -"C'est la dernière partie du tutoriel Flower (pour l'instant !), " -"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " -"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " -"dans le tutoriel, nous te recommandons les ressources suivantes :" +"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " +"basé sur le moteur expérimental du client virtuel, qui deviendra la " +"nouvelle méthode par défaut pour effectuer la plupart des types de " +"simulations à grande échelle dans Flower. L'exemple existant a été " +"conservé à des fins de référence, mais il pourrait être supprimé à " +"l'avenir." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "`Lire les docs sur les fleurs `__" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1228 msgid "" -"`Check out Flower Code Examples " -"`__" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"`Check out Flower Code Examples " -"`__" +"**Nouvelles stratégies intégrées** " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -#, fuzzy +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(résumé) FedOpt" + +#: ../../source/ref-changelog.md:1233 msgid "" -"`Use Flower Baselines for your research " -"`__" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"`Utilise les lignes de base des fleurs pour ta recherche " -"`__" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -#, fuzzy +#: ../../source/ref-changelog.md:1235 msgid "" -"`Watch Flower Summit 2023 videos `__" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -"`Regardez les vidéos du Flower Summit 2022 `__" +"Le serveur Flower est maintenant totalement agnostique, toutes les " +"instances restantes de métriques spécifiques à une tâche (telles que " +"`accuracy`) ont été remplacées par des dictionnaires de métriques " +"personnalisées. Flower 0.15 a introduit la possibilité de passer un " +"dictionnaire contenant des métriques personnalisées du client au serveur." +" À partir de cette version, les métriques personnalisées remplacent les " +"métriques spécifiques à une tâche sur le serveur." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:1237 +#, fuzzy +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" +"Les dictionnaires de métriques personnalisés sont maintenant utilisés " +"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " +"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " +"permettent aux fonctions d'évaluation passées aux stratégies intégrées " +"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " +"stratégies peuvent même renvoyer des dictionnaires de métriques " +"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1239 #, fuzzy msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " -"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " -"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " -"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " -"basé sur PyTorch en utilisant Flower." +"Les implémentations de Stratey doivent migrer leurs méthodes " +"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " +"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " +"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " +"`return loss, {\"accuracy\" : accuracy}`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -#, fuzzy -msgid "Let's get started!" -msgstr "Allons-y, déclarons-le !" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" +"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " +"pris en charge), la compatibilité sera supprimée dans une prochaine " +"version." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1243 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " -"dont nous avons besoin." +"**Avertissements de migration pour les fonctionnalités obsolètes** " +"([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:1245 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " -"et ``torchvision``) et Flower (``flwr``) :" +"Les versions antérieures de Flower ont souvent été migrées vers de " +"nouvelles API, tout en maintenant la compatibilité avec les anciennes " +"API. Cette version introduit des messages d'avertissement détaillés si " +"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " +"d'avertissement fournissent souvent des détails sur la façon de migrer " +"vers des API plus récentes, facilitant ainsi la transition d'une version " +"à l'autre." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:1247 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." +"Amélioration des docs et des docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "Chargement des données" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "Exemple et documentation MXNet" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy +#: ../../source/ref-changelog.md:1251 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " -"dans différents domaines. Dans ce tutoriel, nous présentons " -"l'apprentissage fédéré en formant un simple réseau neuronal " -"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " -"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " -"font la distinction entre les images de dix classes différentes :" +"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" +" fédération ([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:1255 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" -" plusieurs organisations (également appelé le paramètre \"cross-silo\" " -"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " -"original en plusieurs partitions. Chaque partition représentera les " -"données d'une seule organisation. Nous faisons cela purement à des fins " -"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" -" données parce que chaque organisation a déjà ses propres données (les " -"données sont donc naturellement partitionnées)." +"**Serveur agnostique de sérialisation** " +"([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:1257 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -"Chaque organisation agira comme un client dans le système d'apprentissage" -" fédéré. Ainsi, le fait que dix organisations participent à une " -"fédération signifie que dix clients sont connectés au serveur " -"d'apprentissage fédéré :" +"Le serveur Flower est désormais totalement agnostique en matière de " +"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " +"représente les paramètres sous forme de tableaux NumPy désérialisés) a " +"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " +"Les objets `Parameters` sont totalement agnostiques en matière de " +"sérialisation et représentent les paramètres sous forme de tableaux " +"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " +"d'octets doivent être interprétés (par exemple, pour la " +"sérialisation/désérialisation)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:1259 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" +"Les stratégies intégrées mettent en œuvre cette approche en gérant en " +"interne la sérialisation et la désérialisation de `Weights`. Les " +"implémentations de stratégies personnalisées ou tierces doivent être " +"mises à jour avec les définitions de méthodes de stratégie légèrement " +"modifiées. Les auteurs de stratégies peuvent consulter le PR " +"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " +"stratégies peuvent facilement migrer vers le nouveau format." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:1261 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"Nous avons maintenant une liste de dix ensembles de formation et dix " -"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" -" les données de dix organisations différentes. Chaque paire " -"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " -"exemples de validation. Il y a également un seul ``testloader`` (nous " -"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " -"nécessaire que pour construire des systèmes de recherche ou d'éducation, " -"les systèmes d'apprentissage fédérés actuels ont leurs données " -"naturellement distribuées à travers plusieurs partitions." +"Déclassé `flwr.server.Server.evaluate`, utiliser " +"`flwr.server.Server.evaluate_round` à la place " +"([#717](https://github.com/adap/flower/pull/717))" + +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1267 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " -"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " -"poursuivre :" +"**Initialisation des paramètres côté serveur** " +"([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:1269 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" -" ``chargeur de formation`` de notre liste de dix ``chargeurs de " -"formation``. Elle imprime également les étiquettes associées à chaque " -"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " -"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " -"autre lot d'images." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "Étape 1 : Formation centralisée avec PyTorch" +"Les paramètres du modèle peuvent maintenant être initialisés côté " +"serveur. L'initialisation des paramètres côté serveur fonctionne via une " +"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1271 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " -"neuronal convolutif. Cette introduction suppose une familiarité de base " -"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " -"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " -"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " -"`__." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "Définir le modèle" +"Les stratégies intégrées prennent en charge un nouvel argument du " +"constructeur appelé `initial_parameters` pour définir les paramètres " +"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " +"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1290 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " -"`__ :" +"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " +"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " +"l'un des clients connectés ses paramètres et les utilisera comme " +"paramètres globaux initiaux)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "Entraîne le modèle" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1300 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -"Nous avons maintenant tous les éléments de base dont nous avons besoin : " -"un ensemble de données, un modèle, une fonction d'entraînement et une " -"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " -"de données de l'une de nos organisations (``trainloaders[0]``). Cela " -"simule la réalité de la plupart des projets d'apprentissage automatique " -"aujourd'hui : chaque organisation possède ses propres données et entraîne" -" les modèles uniquement sur ces données internes :" +"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " +"retour** ([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:1302 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " -"époques devrait se traduire par une précision de l'ensemble de test " -"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " -"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " -"juste de montrer un pipeline d'entraînement centralisé simpliste qui " -"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "Étape 2 : Apprentissage fédéré avec Flower" +"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " +"associant les clés `str` aux valeurs des types suivants : `bool`, " +"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " +"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " +"du serveur !" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:1304 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" -" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" -" un seul ``valloader``). Ensuite, nous allons simuler une situation où " -"nous avons plusieurs ensembles de données dans plusieurs organisations et" -" où nous formons un modèle sur ces organisations à l'aide de " -"l'apprentissage fédéré." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "Mise à jour des paramètres du modèle" +"Cette amélioration a également permis de rendre plus cohérents les types " +"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " +"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " +"d'exemples, et un dictionnaire contenant des valeurs arbitraires " +"spécifiques au problème comme la précision." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1306 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " -"global au client, et le client met à jour le modèle local avec les " -"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " -"données locales (ce qui modifie les paramètres du modèle localement) et " -"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " -"alternativement, il renvoie seulement les gradients au serveur, et non " -"pas les paramètres complets du modèle)." +"Au cas où tu te poserais la question : cette fonctionnalité est " +"compatible avec les projets existants, la valeur de retour supplémentaire" +" du dictionnaire est facultative. Le nouveau code doit cependant migrer " +"vers les nouveaux types de retour pour être compatible avec les " +"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " +"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " +"ci-dessous pour plus de détails." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1308 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " -"local avec les paramètres reçus du serveur et pour obtenir les paramètres" -" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " -"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " -"ci-dessus." +"*Exemple de code:* note les valeurs de retour du dictionnaire " +"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1323 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -"Les détails de ce fonctionnement ne sont pas vraiment importants ici " -"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " -"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " -"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" -" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " -"sérialiser/désérialiser) :" +"**Généralisé** `config` **argument dans** `Client.fit` **et** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "Mise en place d'un client Flower" +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" +"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " +"signifie que les valeurs du dictionnaire devaient être des chaînes. La " +"nouvelle version généralise cela pour permettre les valeurs des types " +"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1327 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -"Ceci étant dit, passons à la partie intéressante. Les systèmes " -"d'apprentissage fédérés se composent d'un serveur et de plusieurs " -"clients. Dans Flower, nous créons des clients en mettant en œuvre des " -"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." -" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " -"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " -"de chaudière." +"Cela signifie que l'on peut maintenant passer des valeurs presque " +"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " +"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " +"du côté client !" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1329 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons une sous-classe de " -"``flwr.client.NumPyClient`` et mettons en œuvre les trois méthodes " -"``get_parameters``, ``fit`` et ``evaluate`` :" +"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" +" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:1350 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " -"paramètres du modèle sur les données locales et renvoie les paramètres du" -" modèle (mis à jour) au serveur" +"Nouvel exemple : PyTorch de centralisé à fédéré " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "Amélioration de la documentation" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " -"paramètres du modèle sur les données locales et renvoie le résultat de " -"l'évaluation au serveur" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1354 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"Nous avons mentionné que nos clients utiliseront les composants PyTorch " -"définis précédemment pour la formation et l'évaluation des modèles. " -"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" +"Mise à jour de la documentation des exemples " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:1355 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Suppression de la documentation obsolète " +"([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "Utilisation du moteur du client virtuel" +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "Correction de bogues :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:1359 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " -"avec 10 clients sur une seule machine. Cela signifie que le serveur et " -"les 10 clients vivront sur une seule machine et partageront des " -"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " -"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " -"cela sur une seule machine peut rapidement épuiser les ressources mémoire" -" disponibles, même si seulement un sous-ensemble de ces clients participe" -" à un seul tour d'apprentissage fédéré." +"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " +"déconnexion des clients est maintenant gérée dans " +"`flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 -msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" -msgstr "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "Commencer la formation" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "Changements importants :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:1365 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -"Nous avons maintenant la classe ``FlowerClient`` qui définit " -"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " -"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " -"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " -"dernière étape consiste à démarrer la simulation réelle en utilisant " -"``flwr.simulation.start_simulation``." +"Ajout d'un exemple pour les périphériques embarqués " +"([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:1366 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -"La fonction ``start_simulation`` accepte un certain nombre d'arguments, " -"parmi lesquels le ``client_fn`` utilisé pour créer les instances " -"``FlowerClient``, le nombre de clients à simuler (``num_clients``), le " -"nombre de tours d'apprentissage fédéré (``num_rounds``), et la stratégie." -" La stratégie encapsule l'approche/algorithme d'apprentissage fédéré, par" -" exemple, *Federated Averaging* (FedAvg)." +"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:1367 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " -"pouvons également utiliser nos propres implémentations de stratégies pour" -" personnaliser presque tous les aspects de l'approche de l'apprentissage " -"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " -"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " -"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " -"deviné - démarre la simulation :" +"Déclassement du paquet `flwr_example` et migration des exemples dans le " +"répertoire de premier niveau `examples` " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "Dans les coulisses" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" -"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " -"simulation ?" +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "Changements incompatibles :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1373 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " -"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " -"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " -"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " -"il choisit 10 clients au hasard (c'est à dire 100% de 10)." +"Renommé les méthodes de stratégie " +"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" +" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " +"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" +" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " +"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " +"`Strategy` suivantes en conséquence :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 -msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." -msgstr "" -"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." -" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " -"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " -"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " -"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " -"d'apprentissage fédéré." +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "Où est la précision ?" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1381 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -"Tu as peut-être remarqué que toutes les mesures, à l'exception de " -"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " -"float(précision)}`` ?" +"Déclassé `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " +"`FedAvg` à la place." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1382 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -"Flower peut automatiquement agréger les pertes renvoyées par les clients " -"individuels, mais il ne peut pas faire la même chose pour les mesures " -"dans le dictionnaire de mesures générique (celui avec la clé " -"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" -" mesures très différents et même des paires clé/valeur qui ne sont pas " -"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " -"comment les gérer automatiquement." +"Exemples simplifiés et lignes de base " +"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1383 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -"En tant qu'utilisateurs, nous devons indiquer au framework comment " -"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" -" des fonctions d'agrégation de métriques à la stratégie. La stratégie " -"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " -"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " -"possibles sont ``fit_metrics_aggregation_fn`` et " -"``evaluate_metrics_aggregation_fn``." +"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " +"de stratégie ([#483](https://github.com/adap/flower/pull/483))." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:1384 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " -"la mesure de \"précision\" que nous renvoie ``evaluate`` :" +"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1385 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -"La seule chose qui reste à faire est d'indiquer à la stratégie d'appeler " -"cette fonction chaque fois qu'elle reçoit des dictionnaires de métriques " -"d'évaluation de la part des clients :" +"Amélioration des docstrings `Stratégie` " +"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 -msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." -msgstr "" -"Nous avons maintenant un système complet qui effectue la formation " -"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " -"pondérée`` pour agréger les mesures d'évaluation personnalisées et " -"calcule une seule mesure de ``précision`` pour tous les clients du côté " -"du serveur." +#: ../../source/ref-example-projects.rst:2 +#, fuzzy +msgid "Example projects" +msgstr "Exemples de PyTorch" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-example-projects.rst:4 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -"Les deux autres catégories de mesures (``pertes_centralisées`` et " -"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" -" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" -" tutoriel sur les fleurs couvrira l'évaluation centralisée." +"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " +"montrent comment Flower peut être utilisé pour fédérer différents types " +"de pipelines d'apprentissage automatique existants, qui s'appuient " +"généralement sur des frameworks d'apprentissage automatique populaires " +"tels que `PyTorch `_ ou `TensorFlow " +"`_." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "Remarques finales" +#: ../../source/ref-example-projects.rst:10 +#, fuzzy +msgid "" +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" +msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-example-projects.rst:14 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" -" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " -"fédéré avec Flower. La même approche que tu as vue peut être utilisée " -"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " -"et d'autres tâches (pas seulement la classification des images CIFAR-10)," -" par exemple le NLP avec Hugging Face Transformers ou la parole avec " -"SpeechBrain." +"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " +"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-example-projects.rst:17 +#, fuzzy msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " -"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " -"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" -" cela et bien plus encore dans le prochain tutoriel." +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/ref-example-projects.rst:18 #, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`Quickstart TensorFlow (Tutorial) `_" + +#: ../../source/ref-example-projects.rst:19 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " -"`__ va plus en profondeur sur les stratégies et toutes les " -"choses avancées que tu peux construire avec elles." +"`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -#, fuzzy -msgid "Use a federated learning strategy" -msgstr "Stratégie de moyenne fédérée." +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" +msgstr "Démarrage rapide de PyTorch" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-example-projects.rst:25 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " -"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " -"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)." +"L'exemple de démarrage rapide PyTorch montre la classification d'images " +"CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-example-projects.rst:28 +#, fuzzy msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -"Dans ce carnet, nous allons commencer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et " -"`PyTorch `__)." +"`Quickstart PyTorch (Code) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 +#: ../../source/ref-example-projects.rst:29 #, fuzzy -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "Dépassons FedAvg avec les stratégies florales !" +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`Quickstart PyTorch (Tutorial) `_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "Personnalisation de la stratégie" +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-example-projects.rst:35 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " -"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " -"certain nombre de nouvelles fonctionnalités." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "Paramètres côté serveur **initialisation**" +"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" +" l'aide de Flower :" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-example-projects.rst:37 +#, fuzzy msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -"Flower, par défaut, initialise le modèle global en demandant à un client " -"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " -"cependant avoir plus de contrôle sur l'initialisation des paramètres. " -"Flower te permet donc de passer directement les paramètres initiaux à la " -"Stratégie :" +"`PyTorch : De la centralisation à la fédération (Code) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-example-projects.rst:38 +#, fuzzy msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " -"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" -" nous regardons de près, nous pouvons voir que les journaux ne montrent " -"aucun appel à la méthode ``FlowerClient.get_parameters``." +"`PyTorch : De la centralisation à la fédération (Tutoriel) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "Commencer par une stratégie personnalisée" +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-example-projects.rst:44 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -"Elle accepte un certain nombre d'arguments, parmi lesquels le " -"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " -"nombre de clients à simuler ``num_clients``, le nombre de rounds " -"``num_rounds``, et la stratégie." +"Cet exemple montre comment Flower peut être utilisé pour construire un " +"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " +"Jetson :" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-example-projects.rst:46 +#, fuzzy msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " -"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " -"différente cette fois-ci :" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "Paramètre côté serveur **évaluation**" +#: ../../source/ref-example-projects.rst:47 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-faq.rst:4 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " -"évaluations côté client et côté serveur sont similaires à certains " -"égards, mais différentes à d'autres." +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." + +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-faq.rst:8 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " -"conceptuellement simple : elle fonctionne de la même manière que " -"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " -"ensemble de données côté serveur qui peut être utilisé à des fins " -"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " -"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " -"le modèle aux clients. Nous avons également la chance que l'ensemble de " -"notre ensemble de données d'évaluation soit disponible à tout moment." +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-faq.rst:10 +#, fuzzy msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"`Flower simulation PyTorch " +"`_" msgstr "" -"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " -"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " -"centralisé et nous permet d'évaluer les modèles sur un plus grand " -"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" -" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " -"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" -" représentatifs. Mais cette puissance a un coût : une fois que nous " -"commençons à évaluer côté client, nous devons savoir que notre ensemble " -"de données d'évaluation peut changer au cours des cycles d'apprentissage " -"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " -"l'ensemble de données détenu par chaque client peut également changer au " -"cours des cycles consécutifs. Cela peut conduire à des résultats " -"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " -"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" -" cycles consécutifs." +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-faq.rst:11 +#, fuzzy msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " -"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " -"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " -"paramètres du modèle agrégé du côté serveur :" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" +":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " +"sur un Raspberry Pi ?" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/ref-faq.rst:15 #, fuzzy msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"Trouve le `blog post about federated learning on embedded device ici " +"`_" +" et l'exemple de code GitHub correspondant " +"`_." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " -"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " -"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " -"côté serveur. Nous fournissons une fonction à la stratégie, et la " -"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" -" :" +":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " +"sur les appareils Android ?" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-faq.rst:19 +#, fuzzy msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -"Ensuite, nous allons simplement passer cette fonction à la stratégie " -"FedAvg avant de commencer la simulation :" +"Oui. Jetez un coup d'œil à notre `blog post " +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-faq.rst:21 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"`Android Kotlin example `_" msgstr "" -"Comme nous pouvons le voir, les journaux des clients incluent maintenant " -"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " -"dictionnaire ``config``). Nous pouvons également configurer " -"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" -" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " -"époques au cours du troisième cycle." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" +":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" +" ?" + +#: ../../source/ref-faq.rst:26 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -"Les clients peuvent également renvoyer des valeurs arbitraires au " -"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " -"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " -"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " -"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " -"tant que troisième valeur de retour dans ``evaluate``." +"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " +"environnement blockchain est disponible ici :" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:30 +#, fuzzy +msgid "Local blockchain with federated learning simulation." msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-faq.rst:31 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " -"Flower pour expérimenter avec un grand nombre de clients." +"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-faq.rst:32 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " -"d'entraînement et 5 exemples de validation. Etant donné que le nombre " -"d'exemples d'entraînement sur chaque client est assez faible, nous " -"devrions probablement entraîner le modèle un peu plus longtemps, nous " -"configurons donc les clients pour qu'ils effectuent 3 époques " -"d'entraînement local. Nous devrions également ajuster la fraction de " -"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " -"pas que les 1000 clients participent à chaque tour), nous ajustons donc " -"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" -" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " -"chaque tour :" +"`Flower rencontre Nevermined vidéo YouTube " +"`_." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/ref-faq.rst:33 +#, fuzzy msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +"`Flower meets KOSMoS `_." msgstr "" -"Dans ce carnet, nous avons vu comment nous pouvons progressivement " -"améliorer notre système en personnalisant la stratégie, en initialisant " -"les paramètres côté serveur, en choisissant une stratégie différente et " -"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " -"si peu de code, n'est-ce pas ?" +"`Flower rencontre KOSMoS `_." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-faq.rst:34 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"`Flower meets Talan blog post `_ ." msgstr "" -"Dans les sections ultérieures, nous avons vu comment nous pouvons " -"communiquer des valeurs arbitraires entre le serveur et les clients pour " -"personnaliser entièrement l'exécution côté client. Grâce à cette " -"capacité, nous avons construit une simulation d'apprentissage fédéré à " -"grande échelle en utilisant le moteur de client virtuel Flower et nous " -"avons mené une expérience impliquant 1000 clients dans la même charge de " -"travail - le tout dans un carnet Jupyter !" +"`Flower meets Talan blog post `_ ." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -#, fuzzy +#: ../../source/ref-faq.rst:35 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " -"`__ montre comment construire une ``Stratégie`` entièrement " -"personnalisée à partir de zéro." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" +"`Flower rencontre Talan Dépôt GitHub " +"`_ ." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -#, fuzzy -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." -msgstr "" -"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " -"construiras ton premier système dans Flower, et tu l'étendras " -"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " -"seras capable de construire des systèmes d'apprentissage fédéré avancés " -"qui se rapprochent de l'état actuel de l'art dans le domaine." +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "Télémétrie" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-telemetry.md:3 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " -"l'apprentissage fédéré. Seule une compréhension de base de la science des" -" données et de la programmation Python est supposée." +"Le projet open-source Flower recueille des mesures d'utilisation " +"**anonymes** afin de prendre des décisions éclairées pour améliorer " +"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" +" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " +"confrontés." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -#, fuzzy +#: ../../source/ref-telemetry.md:5 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** En restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des mesures d'utilisation anonymes." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "Apprentissage automatique classique" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "Principes" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 -msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " -"rapidement la façon dont la plupart des apprentissages automatiques " -"fonctionnent aujourd'hui." +"Nous suivons des principes stricts concernant la collecte de données " +"anonymes sur l'utilisation :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-telemetry.md:11 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" -" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " -"chose d'autre, comme la régression linéaire classique." +"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " +"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" +"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " +"contiennent aucune information personnelle identifiable (PII). Voir " +"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " +"mesures sont rapportées." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "Modèle et données" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-telemetry.md:13 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -"Nous entraînons le modèle en utilisant les données pour effectuer une " -"tâche utile. Une tâche peut consister à détecter des objets dans des " -"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" -" Go." +"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " +"sont rapportées ; voir la section \"[Comment inspecter ce qui est " +"rapporté](#how-to-inspect-what-is-being-reported)\"" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +#: ../../source/ref-telemetry.md:14 +#, fuzzy +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" +"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " +"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" +"to-contact-us)\" pour plus de détails." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "Entraîne le modèle à l'aide des données" +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "Comment se désinscrire" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -#, fuzzy +#: ../../source/ref-telemetry.md:18 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -"Dans la pratique, les données d'entraînement avec lesquelles nous " -"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" -" le modèle. Elles sont créées ailleurs." +"Lorsque Flower démarre, il vérifie la présence d'une variable " +"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " +"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " +"supposant que tu démarres un serveur ou un client Flower, fais-le " +"simplement en faisant précéder ta commande de la façon suivante :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -#, fuzzy +#: ../../source/ref-telemetry.md:24 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" -" avec une application, une voiture qui collecte des données de capteurs, " -"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" -"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " -"chanson." +"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " +"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " +"environnement) pour désactiver la télémétrie de la fleur de façon " +"permanente." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" -msgstr "" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "Mesures collectées" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "Données sur un téléphone" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "La télémétrie des fleurs recueille les métriques suivantes :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-telemetry.md:30 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -"Il est également important de mentionner que cet \"ailleurs\" n'est " -"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " -"plusieurs appareils fonctionnant tous avec la même application. Mais il " -"peut également s'agir de plusieurs organisations, qui génèrent toutes des" -" données pour la même tâche." +"**Cela nous aide à décider si nous devons investir des efforts dans la " +"publication d'une version corrective pour une version plus ancienne de " +"Flower ou si nous devons plutôt utiliser la bande passante pour " +"développer de nouvelles fonctionnalités." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" +"**Système d'exploitation.** Nous permet de répondre à des questions " +"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " +"Windows ?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "Les données se trouvent sur de nombreux appareils" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-telemetry.md:34 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " -"d'analyse de données, l'approche utilisée par le passé consistait à " -"collecter toutes les données sur un serveur central. Ce serveur peut se " -"trouver quelque part dans un centre de données, ou quelque part dans le " -"cloud." +"**Version de Python.** Connaître la version de Python nous aide, par " +"exemple, à décider si nous devons investir des efforts dans la prise en " +"charge des anciennes versions de Python ou cesser de les prendre en " +"charge et commencer à tirer parti des nouvelles fonctionnalités de " +"Python." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" +"**Comprendre l'environnement matériel dans lequel Flower est utilisé " +"permet de décider si nous devrions, par exemple, faire plus d'efforts " +"pour prendre en charge les environnements à faibles ressources." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "Collecte centralisée des données" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -#, fuzzy +#: ../../source/ref-telemetry.md:38 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -"Une fois que toutes les données sont rassemblées en un seul endroit, nous" -" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " -"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " -"automatique sur laquelle nous nous sommes fondamentalement toujours " -"appuyés." +"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " +"démarre nous permet de comprendre à quel point certaines fonctionnalités " +"sont utilisées et de mieux établir les priorités en fonction de cela." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" +"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " +"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " +"nous permet de comprendre quels types d'appareils non seulement démarrent" +" les charges de travail Flower, mais aussi les terminent avec succès." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "Formation au modèle central" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "Les défis de l'apprentissage automatique classique" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-telemetry.md:42 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -"L'approche classique de l'apprentissage automatique que nous venons de " -"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " -"peut citer la catégorisation des photos de vacances, ou l'analyse du " -"trafic web. Des cas, où toutes les données sont naturellement disponibles" -" sur un serveur centralisé." +"**Source.** La télémétrie de Flower essaie de stocker un ID de source " +"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " +"télémétrie est généré. L'ID de source est important pour identifier si un" +" problème est récurrent ou si un problème est déclenché par plusieurs " +"clusters fonctionnant simultanément (ce qui arrive souvent en " +"simulation). Par exemple, si un périphérique exécute plusieurs charges de" +" travail en même temps, et que cela entraîne un problème, alors, afin de " +"reproduire le problème, plusieurs charges de travail doivent être " +"démarrées en même temps." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" +"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " +"souhaites que tous les événements enregistrés sous un identifiant de " +"source spécifique soient supprimés, tu peux envoyer une demande de " +"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " +"Tous les événements liés à cet identifiant de source seront alors " +"définitivement supprimés." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "Possibilité de centralisation" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-telemetry.md:46 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" -" : lorsque les données ne sont pas disponibles sur un serveur centralisé," -" ou lorsque les données disponibles sur un serveur ne sont pas " -"suffisantes pour former un bon modèle." +"Nous ne collecterons aucune information personnelle identifiable. Si tu " +"penses que l'une des métriques collectées pourrait être utilisée à " +"mauvais escient de quelque manière que ce soit, merci de [nous " +"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " +"refléter toute modification des métriques collectées et nous publierons " +"les changements dans le journal des modifications (changelog)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" +"Si tu penses que d'autres mesures nous seraient utiles pour mieux " +"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " +"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " +"la vie privée des utilisateurs, nous pourrons les ajouter." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "Impossible de centraliser" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "Comment inspecter ce qui est rapporté" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -#, fuzzy +#: ../../source/ref-telemetry.md:52 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -"Il existe de nombreuses raisons pour lesquelles l'approche classique " -"centralisée de l'apprentissage automatique ne fonctionne pas pour un " -"grand nombre de cas d'utilisation très importants dans le monde réel, " -"notamment :" +"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " +"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " +"informations de télémétrie rapportées en définissant la variable " +"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " +"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " +"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " +"sans envoyer de mesures." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -#, fuzzy +#: ../../source/ref-telemetry.md:58 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " -"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " -"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " -"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " -"réglementations protègent les données sensibles contre le déplacement. En" -" fait, ces réglementations empêchent même parfois des organisations " -"individuelles de combiner les données de leurs propres utilisateurs pour " -"la formation à l'intelligence artificielle parce que ces utilisateurs " -"vivent dans différentes parties du monde, et que leurs données sont " -"régies par des réglementations différentes en matière de protection des " -"données." +"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " +"anonymes, utilise les deux variables d'environnement :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "Comment nous contacter" + +#: ../../source/ref-telemetry.md:66 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" -" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " -"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" -" de passe et tes informations de carte de crédit sur le clavier numérique" -" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " -"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" -"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " -"l'apprentissage fédéré a été inventé en premier lieu." +"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " +"traitons les mesures d'utilisation anonymes, contacte-nous via " +"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " +"courriel (`telemetry@flower.ai`)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -#, fuzzy +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -"**volume de données** : certains capteurs, comme les caméras, produisent " -"un volume de données si important qu'il n'est ni possible ni économique " -"de collecter toutes les données (en raison, par exemple, de la bande " -"passante ou de l'efficacité des communications). Pensez à un service " -"ferroviaire national comptant des centaines de gares à travers le pays. " -"Si chacune de ces gares est équipée d'un certain nombre de caméras de " -"sécurité, le volume de données brutes sur les appareils qu'elles " -"produisent nécessite une infrastructure incroyablement puissante et " -"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " -"ces données ne sont même pas utiles." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "" -"Voici quelques exemples où l'apprentissage automatique centralisé ne " -"fonctionne pas :" +#: ../../source/tutorial-quickstart-android.rst:5 +#, fuzzy +msgid "Quickstart Android" +msgstr "Démarrage rapide des Pandas" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/tutorial-quickstart-android.rst:10 #, fuzzy msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " -"former des modèles de détection du cancer" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/tutorial-quickstart-android.rst:12 +#, fuzzy msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -"Informations financières provenant de différentes organisations pour " -"détecter les fraudes financières" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -"Les données de localisation de ta voiture électrique pour mieux prédire " -"l'autonomie" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -"Messages cryptés de bout en bout pour former de meilleurs modèles " -"d'autocomplétion" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -#, fuzzy +#: ../../source/tutorial-quickstart-fastai.rst:20 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -"La popularité des systèmes améliorant la confidentialité comme le " -"navigateur `Brave `__ ou le messager `Signal " -"`__ montre que les utilisateurs se soucient de la " -"confidentialité. En fait, ils choisissent la version améliorant la " -"confidentialité plutôt que d'autres alternatives, si une telle " -"alternative existe. Mais que pouvons-nous faire pour appliquer " -"l'apprentissage automatique et la science des données à ces cas afin " -"d'utiliser les données privées ? Après tout, ce sont tous des domaines " -"qui bénéficieraient de manière significative des récentes avancées en " -"matière d'IA." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "Apprentissage fédéré" +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "et active l'environnement virtuel avec :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/tutorial-quickstart-fastai.rst:43 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -"L'apprentissage fédéré inverse simplement cette approche. Il permet " -"l'apprentissage automatique sur des données distribuées en déplaçant la " -"formation vers les données, au lieu de déplacer les données vers la " -"formation. Voici l'explication en une seule phrase :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "Apprentissage automatique central : déplace les données vers le calcul" +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/tutorial-quickstart-fastai.rst:110 +#, fuzzy msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " -"d'autres approches de science des données) dans des domaines où cela " -"n'était pas possible auparavant. Nous pouvons désormais former " -"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" -" travailler ensemble. Nous pouvons résoudre les fraudes financières en " -"formant des modèles d'IA sur les données de différentes institutions " -"financières. Nous pouvons créer de nouvelles applications d'amélioration " -"de la confidentialité (telles que la messagerie sécurisée) qui ont une " -"meilleure IA intégrée que leurs alternatives d'amélioration de la " -"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" -" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " -"nous découvrons de plus en plus de domaines qui peuvent soudainement être" -" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " -"données auparavant inaccessibles." +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -"Comment fonctionne l'apprentissage fédéré ? Commençons par une " -"explication intuitive." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "L'apprentissage fédéré en cinq étapes" +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" +msgstr "Démarrage rapide 🤗 Transformateurs" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "Étape 0 : Initialisation du modèle global" +#: ../../source/tutorial-quickstart-huggingface.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -"Nous commençons par initialiser le modèle sur le serveur. C'est " -"exactement la même chose dans l'apprentissage centralisé classique : nous" -" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " -"partir d'un point de contrôle précédemment sauvegardé." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "Initialise le modèle global" +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" -" connectés (nœuds clients)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " -"connectés (par exemple, les appareils périphériques comme les smartphones" -" ou les serveurs appartenant à des organisations). Cela permet de " -"s'assurer que chaque nœud participant commence sa formation locale en " -"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " -"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " -"est que la sélection d'un nombre croissant de nœuds clients a des " -"rendements décroissants." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "Envoyer le modèle global" +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/tutorial-quickstart-huggingface.rst:113 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -"Étape 2 : Entraîne le modèle localement sur les données de chaque " -"organisation/appareil (nœud client)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " -"dernière version des paramètres du modèle global, ils commencent " -"l'entraînement local. Ils utilisent leur propre ensemble de données " -"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " -"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " -"pendant un petit moment. Il peut s'agir d'une seule époque sur les " -"données locales, ou même de quelques étapes (mini-batchs)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "Chargement des données" + +#: ../../source/tutorial-quickstart-huggingface.rst:132 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "Forme-toi aux données locales" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#, fuzzy +msgid "The Model" +msgstr "Entraîne le modèle" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" +#: ../../source/tutorial-quickstart-huggingface.rst:180 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/tutorial-quickstart-huggingface.rst:193 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -"Après l'entraînement local, chaque nœud client possède une version " -"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." -" Les paramètres sont tous différents parce que chaque nœud client a des " -"exemples différents dans son ensemble de données local. Les nœuds clients" -" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " -"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " -"du modèle, soit seulement les gradients qui ont été accumulés au cours de" -" l'entraînement local." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" +#: ../../source/tutorial-quickstart-huggingface.rst:196 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "Envoyer les mises à jour du modèle" +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "client" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/tutorial-quickstart-huggingface.rst:241 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " -"global" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -"Le serveur reçoit les mises à jour du modèle des nœuds clients " -"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" -" de 100 versions légèrement différentes du modèle global original, " -"chacune ayant été formée sur les données locales d'un client. Mais ne " -"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " -"des données de l'ensemble des 100 nœuds clients ?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" +#: ../../source/tutorial-quickstart-huggingface.rst:296 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "Mises à jour globales du modèle" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#, fuzzy +msgid "The ServerApp" +msgstr "serveur" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " -"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " -"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " -"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" -" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " -"modèle pour obtenir une nouvelle version du modèle global (étape 4)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -#, fuzzy +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -"Au cours d'un seul tour, chaque nœud client qui participe à cette " -"itération ne s'entraîne que pendant un petit moment. Cela signifie " -"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " -"entraîné sur toutes les données de tous les nœuds clients participants, " -"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " -"processus d'entraînement encore et encore pour finalement arriver à un " -"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " -"données de tous les nœuds clients." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -"Félicitations, tu comprends maintenant les bases de l'apprentissage " -"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " -"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" -" tutoriel, nous irons plus en détail. Les questions intéressantes " -"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " -"clients qui devraient participer au prochain tour ? Quelle est la " -"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" -"nous gérer les nœuds clients qui échouent (stragglers) ?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -#, fuzzy +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -"Tout comme nous pouvons former un modèle sur les données décentralisées " -"de différents nœuds clients, nous pouvons également évaluer le modèle sur" -" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" -" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " -"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" -" fédéré." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "Analyses fédérées" +#: ../../source/tutorial-quickstart-ios.rst:5 +#, fuzzy +msgid "Quickstart iOS" +msgstr "Démarrage rapide XGBoost" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +#: ../../source/tutorial-quickstart-ios.rst:10 +#, fuzzy msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " -"pour tirer de la valeur des données. L'analyse des données peut donner " -"des indications précieuses, mais là encore, il n'y a souvent pas assez de" -" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" -" gens développent un certain type de problème de santé ? L'analyse " -"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " -"généralement utilisée en conjonction avec d'autres technologies de " -"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " -"empêcher le serveur de voir les résultats soumis par les nœuds clients " -"individuels." +"Dans ce tutoriel, nous allons apprendre, comment former un réseau " +"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Fleur" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +#: ../../source/tutorial-quickstart-ios.rst:15 +#, fuzzy msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " -"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " -"automatique dans les deux sens, les entraîner et les évaluer sur des " -"données locales, puis agréger les modèles mis à jour. Flower fournit " -"l'infrastructure pour faire exactement cela de manière simple, évolutive " -"et sécurisée. En bref, Flower présente une approche unifiée de " -"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " -"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " -"quel cadre de ML et n'importe quel langage de programmation." +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#: ../../source/tutorial-quickstart-ios.rst:17 +#, fuzzy +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +#: ../../source/tutorial-quickstart-ios.rst:21 +#, fuzzy msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " -"scooter, ordinateur personnel, roomba et téléphone)" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" msgstr "" -"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " -"et son rapport avec l'apprentissage automatique classique (centralisé) !" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" +msgstr "Client de la fleur" + +#: ../../source/tutorial-quickstart-ios.rst:36 msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -"Dans la prochaine partie de ce tutoriel, nous allons construire un " -"premier système d'apprentissage fédéré avec Flower." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -#, fuzzy +#: ../../source/tutorial-quickstart-ios.rst:72 msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " -"`__ " -"montre comment construire un système d'apprentissage fédéré simple avec " -"PyTorch et Flower." - -#~ msgid "Flower CLI commands" -#~ msgstr "Commandes CLI Flower" + +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-scikitlearn.rst:167 +#: ../../source/tutorial-quickstart-xgboost.rst:341 +msgid "Flower Server" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-quickstart-ios.rst:131 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" +"Pour les charges de travail simples, nous pouvons démarrer un serveur " +"Flower et laisser toutes les possibilités de configuration à leurs " +"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " +"Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +msgid "Train the model, federated!" +msgstr "Entraîne le modèle, fédéré !" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-xgboost.rst:567 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " +"généralement un serveur et plusieurs clients. Nous devons donc commencer " +"par démarrer le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:156 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-jax.rst:10 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" +" pour entraîner un modèle de régression linéaire sur un ensemble de " +"données scikit-learn. Nous structurerons l'exemple de la même manière que" +" notre présentation `PyTorch - De la centralisation à la fédération " +"`_. Tout d'abord, nous construisons une approche" +" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " +"avec JAX " +"`_." +" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." + +#: ../../source/tutorial-quickstart-jax.rst:16 +msgid "" +"Before we start building our JAX example, we need install the packages " +":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +msgstr "" +"Avant de commencer à construire notre exemple JAX, nous devons installer " +"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " +":code:`flwr` :" + +#: ../../source/tutorial-quickstart-jax.rst:24 +msgid "Linear Regression with JAX" +msgstr "Régression linéaire avec JAX" + +#: ../../source/tutorial-quickstart-jax.rst:26 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a :code:`Linear Regression` model. If you want a more in-depth " +"explanation of what's going on then have a look at the official `JAX " +"documentation `_." +msgstr "" +"Nous commençons par une brève description du code d'entraînement " +"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" +" explication plus approfondie de ce qui se passe, jette un coup d'œil à " +"la documentation officielle `JAX `_." + +#: ../../source/tutorial-quickstart-jax.rst:29 +msgid "" +"Let's create a new file called :code:`jax_training.py` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " +"be imported. In addition, we need to import :code:`sklearn` since we use " +":code:`make_regression` for the dataset and :code:`train_test_split` to " +"split the dataset into a training and test set. You can see that we do " +"not yet import the :code:`flwr` package for federated learning. This will" +" be done later." +msgstr "" +"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " +"composants nécessaires pour un apprentissage traditionnel (centralisé) de" +" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " +":code:`jaxlib` doivent être importés. En outre, nous devons importer " +":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " +"jeu de données et :code:`train_test_split` pour diviser le jeu de données" +" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " +"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " +"fédéré, ce qui sera fait plus tard." + +#: ../../source/tutorial-quickstart-jax.rst:43 +msgid "" +"The :code:`load_data()` function loads the mentioned training and test " +"sets." +msgstr "" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test mentionnés." + +#: ../../source/tutorial-quickstart-jax.rst:53 +msgid "" +"The model architecture (a very simple :code:`Linear Regression` model) is" +" defined in :code:`load_model()`." +msgstr "" +"L'architecture du modèle (un modèle :code:`Régression linéaire` très " +"simple) est définie dans :code:`load_model()`." + +#: ../../source/tutorial-quickstart-jax.rst:65 +msgid "" +"We now need to define the training (function :code:`train()`), which " +"loops over the training set and measures the loss (function " +":code:`loss_fn()`) for each batch of training examples. The loss function" +" is separate since JAX takes derivatives with a :code:`grad()` function " +"(defined in the :code:`main()` function and called in :code:`train()`)." +msgstr "" +"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," +" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " +":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" +" de perte est séparée puisque JAX prend des dérivés avec une fonction " +":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " +":code:`train()`)." + +#: ../../source/tutorial-quickstart-jax.rst:83 +msgid "" +"The evaluation of the model is defined in the function " +":code:`evaluation()`. The function takes all test examples and measures " +"the loss of the linear regression model." +msgstr "" +"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." +" La fonction prend tous les exemples de test et mesure la perte du modèle" +" de régression linéaire." + +#: ../../source/tutorial-quickstart-jax.rst:94 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the :code:`jax.grad()` function is defined in " +":code:`main()` and passed to :code:`train()`." +msgstr "" +"Après avoir défini le chargement des données, l'architecture du modèle, " +"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " +"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " +"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " +"à :code:`train()`." + +#: ../../source/tutorial-quickstart-jax.rst:111 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" +"Tu peux maintenant exécuter ta charge de travail (centralisée) de " +"régression linéaire JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:117 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " +"avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." + +#: ../../source/tutorial-quickstart-jax.rst:121 +msgid "JAX meets Flower" +msgstr "JAX rencontre Flower" + +#: ../../source/tutorial-quickstart-jax.rst:123 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +":code:`jax_training.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server*, which averages all received " +"parameter updates. This describes one round of the federated learning " +"process, and we repeat this for multiple rounds." +msgstr "" +"Le concept de fédération d'une charge de travail existante est toujours " +"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " +"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" +" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " +"clients.Les *clients* exécutent la formation et mettent à jour les " +"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " +"un tour du processus d'apprentissage fédéré, et nous répétons cette " +"opération pour plusieurs tours." + +#: ../../source/tutorial-quickstart-jax.rst:145 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined JAX training in :code:`jax_training.py`. Our" +" *client* needs to import :code:`flwr`, but also :code:`jax` and " +":code:`jaxlib` to update the parameters on our JAX model:" +msgstr "" +"Enfin, nous allons définir la logique de notre *client* dans " +":code:`client.py` et nous appuyer sur la formation JAX définie " +"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " +":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" +" les paramètres de notre modèle JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:160 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " +"easier to implement than :code:`Client` if you use a framework with good " +"NumPy interoperability (like JAX) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" +" to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" +"L'implémentation d'un *client* Flower signifie essentiellement " +"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " +":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " +":code:`flwr.client.NumPyClient` et nous l'appellerons " +":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " +"implémenter que :code:`Client` si vous utilisez un framework avec une " +"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " +"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " +"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " +"paramètres du modèle, une méthode pour former le modèle, et une méthode " +"pour tester le modèle :" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid ":code:`set_parameters (optional)`" +msgstr ":code:`set_parameters (optional)`" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "transform parameters to NumPy :code:`ndarray`'s" +msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" + +#: ../../source/tutorial-quickstart-jax.rst:174 +msgid "get the updated local model parameters and return them to the server" +msgstr "" +"récupère les paramètres du modèle local mis à jour et les renvoie au " +"serveur" + +#: ../../source/tutorial-quickstart-jax.rst:178 +msgid "return the local loss to the server" +msgstr "renvoie la perte locale au serveur" + +#: ../../source/tutorial-quickstart-jax.rst:180 +msgid "" +"The challenging part is to transform the JAX model parameters from " +":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" +" `NumPyClient`." +msgstr "" +"La partie la plus difficile consiste à transformer les paramètres du " +"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " +"rendre compatibles avec `NumPyClient`." + +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`evaluate()` previously " +"defined in :code:`jax_training.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " +"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " +"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" +" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." + +#: ../../source/tutorial-quickstart-jax.rst:251 +msgid "Having defined the federation process, we can run it." +msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." + +#: ../../source/tutorial-quickstart-jax.rst:280 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " +"l'apprentissage fédéré sur deux clients. Félicitations !" + +#: ../../source/tutorial-quickstart-jax.rst:285 +#, fuzzy +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"Le code source de cet exemple a été amélioré au fil du temps et peut être" +" trouvé ici : `Quickstart JAX " +"`_. " +"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " +"chargent le même jeu de données." + +#: ../../source/tutorial-quickstart-jax.rst:288 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" +"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" +" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " +"ne pas ajouter d'autres clients ?" + +#: ../../source/tutorial-quickstart-mlx.rst:5 +#, fuzzy +msgid "Quickstart MLX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-mlx.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:57 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:106 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:122 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:166 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:190 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:212 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:218 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:231 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:240 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:255 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:275 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:285 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:290 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "Tout assembler" + +#: ../../source/tutorial-quickstart-mlx.rst:344 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:378 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:402 +#: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:407 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:121 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:159 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:184 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:236 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:294 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:323 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:365 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#, fuzzy +msgid "Video tutorial" +msgstr "Tutoriel" + +#: ../../source/tutorial-quickstart-pytorch.rst:376 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " +"régression logistique` sur MNIST en utilisant Flower et scikit-learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"Il est recommandé de créer un environnement virtuel et de tout exécuter " +"dans ce `virtualenv `_." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour individuelles des " +"paramètres du modèle en fonction de leurs ensembles de données locales. " +"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " +"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " +"version améliorée du modèle à chaque *client*. Un cycle complet de mises " +"à jour des paramètres s'appelle un *round*." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"Maintenant que toutes nos dépendances sont installées, exécutons une " +"formation distribuée simple avec deux clients et un serveur. Cependant, " +"avant de configurer le client et le serveur, nous allons définir toutes " +"les fonctionnalités dont nous avons besoin pour notre configuration " +"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " +"contient différentes fonctions définissant toutes les bases de " +"l'apprentissage automatique :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "" +"Renvoie les paramètres d'un modèle de régression logistique " +":code:`sklearn`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#, fuzzy +msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid ":code:`set_initial_params()`" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" +"Tu peux consulter :code:`utils.py` `ici " +"`_ pour plus de détails. Les fonctions prédéfinies sont " +"utilisées dans :code:`client.py` et importées. :code:`client.py` " +"nécessite également d'importer plusieurs paquets tels que Flower et " +"scikit-learn :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +":code:`FederatedDataset.load_partition()` method loads the partitioned " +"training set for each partition ID defined in the :code:`--partition-id` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "" +"Ensuite, le modèle de régression logistique est défini et initialisé avec" +" :code:`utils.set_initial_params()`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Le serveur Flower interagit avec les clients par le biais d'une interface" +" appelée :code:`Client`. Lorsque le serveur sélectionne un client " +"particulier pour la formation, il envoie des instructions de formation " +"sur le réseau. Le client reçoit ces instructions et appelle l'une des " +"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" +" la régression logistique que nous avons définie plus tôt)." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " +"signifie généralement définir les méthodes suivantes " +"(:code:`set_parameters` est cependant facultatif) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid ":code:`set_parameters` (optional)" +msgstr ":code:`set_parameters` (optionnel)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" +"mettre à jour les poids du modèle local avec les paramètres reçus du " +"serveur" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:122 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "est directement importé avec :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "set the local model weights" +msgstr "fixe les poids du modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid "train the local model" +msgstr "entraîne le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#, fuzzy +msgid "return the updated local model weights" +msgstr "recevoir les poids du modèle local mis à jour" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid "test the local model" +msgstr "teste le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "The methods can be implemented in the following way:" +msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons " +":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " +"véritablement fédérée avec le serveur et les clients s'exécutant sur des " +"machines différentes, tout ce qui doit changer est :code:`server_address`" +" que nous transmettons au client." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " +"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" +" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" +"learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:172 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr ":code:`server.py`, importe Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy. Note that we also make use of Flower" +" Datasets here to load the test split of the MNIST dataset for server-" +"side evaluation." +msgstr "" +"Le nombre de tours d'apprentissage fédéré est défini dans " +":code:`fit_round()` et l'évaluation est définie dans " +":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " +"chaque tour d'apprentissage fédéré et te donne des informations sur la " +"perte et la précision." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" +"Le :code:`main` contient l'initialisation des paramètres côté serveur " +":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " +":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " +"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" +" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" +" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " +"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " +"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" +" commencer par lancer le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:239 +#: ../../source/tutorial-quickstart-xgboost.rst:575 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" +"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " +"dans différents terminaux. Ouvre un nouveau terminal et démarre le " +"premier client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:246 +#: ../../source/tutorial-quickstart-xgboost.rst:582 +msgid "Open another terminal and start the second client:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:252 +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" +"Chaque client aura son propre ensemble de données. Tu devrais maintenant " +"voir comment la formation se déroule dans le tout premier terminal (celui" +" qui a démarré le serveur) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" +"mnist`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:118 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:147 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:178 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:212 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:247 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:299 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:14 +#, fuzzy +msgid "Federated XGBoost" +msgstr "Formation fédérée" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +#, fuzzy +msgid "Why federated XGBoost?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +#, fuzzy +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +#, fuzzy +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés à PyTorch :" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " +"the partition for the given client based on :code:`partition_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:205 +msgid "" +"All required parameters defined above are passed to :code:`XgbClient`'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:207 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:262 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. From the second round, we load the global " +"model sent from server to new build Booster object, and then update model" +" weights on local training data with function :code:`local_boost` as " +"follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:281 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`bst_input.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "" +"In :code:`evaluate`, after loading the global model, we call " +":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" +" value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +#, fuzzy +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:332 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " +":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" +" avec le serveur et les clients fonctionnant sur des machines " +"différentes, tout ce qui doit changer est l'adresse " +":code:`server_address` vers laquelle nous dirigeons le client." + +#: ../../source/tutorial-quickstart-xgboost.rst:343 +#, fuzzy +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-xgboost.rst:346 +#, fuzzy +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés au MXNet :" + +#: ../../source/tutorial-quickstart-xgboost.rst:348 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:380 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients. The :code:`config_func` " +"function is to return the current FL round number to client's " +":code:`fit()` and :code:`evaluate()` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:384 +#, fuzzy +msgid "Then, we start the server:" +msgstr "Démarrer le serveur" + +#: ../../source/tutorial-quickstart-xgboost.rst:396 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:398 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:400 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:496 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:555 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:560 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:565 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:641 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:646 +#, fuzzy +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-xgboost.rst:650 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:659 +#, fuzzy +msgid "Cyclic training" +msgstr "Formation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:661 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:665 +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:705 +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:746 +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:749 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:813 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:815 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:846 +#, fuzzy +msgid "Customised centralised/distributed evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:848 +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:880 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:883 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:887 +#, fuzzy +msgid "Flower simulation" +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-quickstart-xgboost.rst:888 +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:922 +msgid "" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:977 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:980 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1031 +msgid "" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1051 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1094 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1096 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1142 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1146 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1200 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1204 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1282 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1285 +#, fuzzy +msgid "Example commands" +msgstr "Exemples de PyTorch" + +#: ../../source/tutorial-quickstart-xgboost.rst:1287 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1294 +#, fuzzy +msgid "Then, on each client terminal, we start the clients:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:1300 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +#, fuzzy +msgid "Build a strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__) " +"et nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et sur les clients " +"(`partie 2 `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons continuer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit précédemment en créant " +"une version personnalisée de FedAvg (encore une fois, en utilisant " +"`Flower `__ et `PyTorch `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "Préparation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer le code proprement dit, assurons-nous que nous " +"disposons de tout ce dont nous avons besoin." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "Tout d'abord, nous installons les paquets nécessaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" +"Maintenant que toutes les dépendances sont installées, nous pouvons " +"importer tout ce dont nous avons besoin pour ce tutoriel :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "Formation/évaluation du modèle" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" +"Continuons avec la définition habituelle du modèle (y compris " +"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " +"et de test :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" +"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " +"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " +"transmettons également le ``cid`` au client et l'utilisons pour consigner" +" des détails supplémentaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " +"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " +"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " +"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " +"changerons ensuite le dictionnaire de configuration (l'un des attributs " +"``FitIns``)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" +"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " +"créée ``FedCustom`` lors du démarrage de l'expérience :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "Récapitulation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"Dans ce carnet, nous avons vu comment mettre en place une stratégie " +"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " +"sur la configuration des nœuds clients, l'agrégation des résultats, et " +"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " +"d'écraser les méthodes abstraites de la classe de base (abstraite) " +"``Strategy``. Pour rendre les stratégies personnalisées encore plus " +"puissantes, tu peux passer des fonctions personnalisées au constructeur " +"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " +"chaque fois que c'est nécessaire." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" +"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " +"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " +"`__ présente ``Client``, l'API flexible qui sous-tend " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +#, fuzzy +msgid "Customize the client" +msgstr "Création du client IMDBC" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__), " +"nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et les clients " +"(`partie 2 `__), et nous avons construit notre propre stratégie " +"personnalisée à partir de zéro (`partie 3 - WIP " +"`__)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " +"nouvelle classe de base pour construire des clients, simplement appelée " +"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " +"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" +" travail avec les bibliothèques d'apprentissage automatique qui ont une " +"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " +"flexibilité que nous n'avions pas auparavant, mais nous devrons également" +" faire quelques choses que nous n'avions pas à faire auparavant." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" +"Allons plus loin et voyons ce qu'il faut faire pour passer de " +"``NumPyClient`` à ``Client`` !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "Étape 0 : Préparation" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "Étape 1 : Revoir NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"Jusqu'à présent, nous avons implémenté notre client en sous-classant " +"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " +"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " +"nous enveloppons la création d'instances de cette classe dans une " +"fonction appelée ``client_fn`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " +"présent. La seule *petite* différence par rapport au carnet précédent est" +" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " +"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " +"que nous obtenons :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" +"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"Plongeons un peu plus profondément et discutons de la façon dont Flower " +"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " +"effectuer un travail, ``start_simulation`` appelle la fonction " +"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" +" (en même temps qu'il charge le modèle et les données)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"Mais voici la partie la plus surprenante : Flower n'utilise pas " +"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " +"l'objet pour le faire ressembler à une sous-classe de " +"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " +"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " +"seulement comment gérer les `Client`. `NumPyClient` est juste une " +"abstraction de commodité construite au dessus de `Client`." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" +"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " +"directement par-dessus `Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" +"Essayons de faire la même chose en utilisant ``Client`` au lieu de " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" +"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " +"devons nous assurer que notre nouveau client basé sur le ``Client`` " +"fonctionne, n'est-ce pas ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" +"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " +"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " +"différence ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"La seule *vraie* différence entre Client et NumPyClient est que " +"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " +"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " +"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " +"Cela permet de travailler avec des bibliothèques d'apprentissage " +"automatique qui ont une bonne prise en charge de NumPy (la plupart " +"d'entre elles) en un clin d'œil." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "Étape 3 : Sérialisation personnalisée" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" +"Nous allons ici explorer comment mettre en œuvre une sérialisation " +"personnalisée à l'aide d'un exemple simple." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " +"simplement le processus de conversion d'un objet en octets bruts, et tout" +" aussi important, la désérialisation est le processus de reconversion des" +" octets bruts en objet. Ceci est très utile pour la communication réseau." +" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " +"objet Python par Internet." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" +"L'apprentissage fédéré s'appuie fortement sur la communication Internet " +"pour la formation en envoyant des objets Python dans les deux sens entre " +"les clients et le serveur, ce qui signifie que la sérialisation est un " +"élément essentiel de l'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"Dans la section suivante, nous allons écrire un exemple de base où, au " +"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " +"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " +"éparses, avant de les envoyer. Cette technique peut être utilisée pour " +"économiser de la bande passante, car dans certains cas où les poids d'un " +"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " +"une matrice éparse peut grandement améliorer leur taille en octets." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"C'est là que la véritable sérialisation/désérialisation se produira, en " +"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " +"``sparse_bytes_to_ndarray`` pour la désérialisation." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" +"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " +"convertir nos tableaux." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "Côté client" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" +"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " +"suffira d'appeler nos fonctions personnalisées dans notre " +"``flwr.client.Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " +"que nous avons obtenus de notre réseau en utilisant nos " +"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " +"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " +"personnalisé, puis nous devons sérialiser nos résultats locaux avec " +"``ndarrays_to_sparse_parameters``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" +"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " +"paramètres globaux avec notre fonction personnalisée." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "Côté serveur" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." +" Pour modifier la sérialisation et la désérialisation ici, il suffit de " +"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " +"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " +"super-classe ``FedAvg``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" +"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " +"résultat que nous avons reçu :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "Puis sérialise le résultat agrégé :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" +"Nous pouvons maintenant exécuter notre exemple de sérialisation " +"personnalisée !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"Dans cette partie du tutoriel, nous avons vu comment construire des " +"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " +"``NumPyClient`` est une abstraction de commodité qui facilite le travail " +"avec les bibliothèques d'apprentissage automatique qui ont une bonne " +"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " +"nous permet de faire des choses qui ne sont pas possibles dans " +"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " +"sérialisation et la désérialisation des paramètres." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" +"C'est la dernière partie du tutoriel Flower (pour l'instant !), " +"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " +"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " +"dans le tutoriel, nous te recommandons les ressources suivantes :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "`Lire les docs sur les fleurs `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "" +"`Check out Flower Code Examples " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +#, fuzzy +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" +"`Utilise les lignes de base des fleurs pour ta recherche " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" +"`Regardez les vidéos du Flower Summit 2022 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " +"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " +"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " +"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " +"basé sur PyTorch en utilisant Flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " +"dont nous avons besoin." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" +"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " +"et ``torchvision``) et Flower (``flwr``) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " +"dans différents domaines. Dans ce tutoriel, nous présentons " +"l'apprentissage fédéré en formant un simple réseau neuronal " +"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " +"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " +"font la distinction entre les images de dix classes différentes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" +" plusieurs organisations (également appelé le paramètre \"cross-silo\" " +"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " +"original en plusieurs partitions. Chaque partition représentera les " +"données d'une seule organisation. Nous faisons cela purement à des fins " +"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" +" données parce que chaque organisation a déjà ses propres données (les " +"données sont donc naturellement partitionnées)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" +"Chaque organisation agira comme un client dans le système d'apprentissage" +" fédéré. Ainsi, le fait que dix organisations participent à une " +"fédération signifie que dix clients sont connectés au serveur " +"d'apprentissage fédéré :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"Nous avons maintenant une liste de dix ensembles de formation et dix " +"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" +" les données de dix organisations différentes. Chaque paire " +"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " +"exemples de validation. Il y a également un seul ``testloader`` (nous " +"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " +"nécessaire que pour construire des systèmes de recherche ou d'éducation, " +"les systèmes d'apprentissage fédérés actuels ont leurs données " +"naturellement distribuées à travers plusieurs partitions." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" +"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " +"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " +"poursuivre :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" +" ``chargeur de formation`` de notre liste de dix ``chargeurs de " +"formation``. Elle imprime également les étiquettes associées à chaque " +"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " +"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " +"autre lot d'images." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "Étape 1 : Formation centralisée avec PyTorch" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " +"neuronal convolutif. Cette introduction suppose une familiarité de base " +"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " +"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " +"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " +"`__." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "Définir le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " +"`__ :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "Entraîne le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" +"Nous avons maintenant tous les éléments de base dont nous avons besoin : " +"un ensemble de données, un modèle, une fonction d'entraînement et une " +"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " +"de données de l'une de nos organisations (``trainloaders[0]``). Cela " +"simule la réalité de la plupart des projets d'apprentissage automatique " +"aujourd'hui : chaque organisation possède ses propres données et entraîne" +" les modèles uniquement sur ces données internes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " +"époques devrait se traduire par une précision de l'ensemble de test " +"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " +"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " +"juste de montrer un pipeline d'entraînement centralisé simpliste qui " +"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" +" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" +" un seul ``valloader``). Ensuite, nous allons simuler une situation où " +"nous avons plusieurs ensembles de données dans plusieurs organisations et" +" où nous formons un modèle sur ces organisations à l'aide de " +"l'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "Mise à jour des paramètres du modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" +"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " +"global au client, et le client met à jour le modèle local avec les " +"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " +"données locales (ce qui modifie les paramètres du modèle localement) et " +"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " +"alternativement, il renvoie seulement les gradients au serveur, et non " +"pas les paramètres complets du modèle)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " +"local avec les paramètres reçus du serveur et pour obtenir les paramètres" +" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " +"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " +"ci-dessus." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"Les détails de ce fonctionnement ne sont pas vraiment importants ici " +"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " +"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " +"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" +" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " +"sérialiser/désérialiser) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" +"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " +"paramètres du modèle sur les données locales et renvoie les paramètres du" +" modèle (mis à jour) au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" +"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " +"paramètres du modèle sur les données locales et renvoie le résultat de " +"l'évaluation au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"Nous avons mentionné que nos clients utiliseront les composants PyTorch " +"définis précédemment pour la formation et l'évaluation des modèles. " +"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " +"avec 10 clients sur une seule machine. Cela signifie que le serveur et " +"les 10 clients vivront sur une seule machine et partageront des " +"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " +"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " +"cela sur une seule machine peut rapidement épuiser les ressources mémoire" +" disponibles, même si seulement un sous-ensemble de ces clients participe" +" à un seul tour d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"Nous avons maintenant la classe ``FlowerClient`` qui définit " +"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " +"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " +"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " +"dernière étape consiste à démarrer la simulation réelle en utilisant " +"``flwr.simulation.start_simulation``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " +"pouvons également utiliser nos propres implémentations de stratégies pour" +" personnaliser presque tous les aspects de l'approche de l'apprentissage " +"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " +"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " +"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " +"deviné - démarre la simulation :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "Commencer la formation" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "Dans les coulisses" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" +"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " +"simulation ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " +"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " +"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " +"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " +"il choisit 10 clients au hasard (c'est à dire 100% de 10)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." +" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " +"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " +"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " +"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "Où est la précision ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"Tu as peut-être remarqué que toutes les mesures, à l'exception de " +"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " +"float(précision)}`` ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower peut automatiquement agréger les pertes renvoyées par les clients " +"individuels, mais il ne peut pas faire la même chose pour les mesures " +"dans le dictionnaire de mesures générique (celui avec la clé " +"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" +" mesures très différents et même des paires clé/valeur qui ne sont pas " +"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " +"comment les gérer automatiquement." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"En tant qu'utilisateurs, nous devons indiquer au framework comment " +"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" +" des fonctions d'agrégation de métriques à la stratégie. La stratégie " +"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " +"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " +"possibles sont ``fit_metrics_aggregation_fn`` et " +"``evaluate_metrics_aggregation_fn``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" +"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " +"la mesure de \"précision\" que nous renvoie ``evaluate`` :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"Nous avons maintenant un système complet qui effectue la formation " +"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " +"pondérée`` pour agréger les mesures d'évaluation personnalisées et " +"calcule une seule mesure de ``précision`` pour tous les clients du côté " +"du serveur." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"Les deux autres catégories de mesures (``pertes_centralisées`` et " +"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" +" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" +" tutoriel sur les fleurs couvrira l'évaluation centralisée." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "Remarques finales" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" +" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " +"fédéré avec Flower. La même approche que tu as vue peut être utilisée " +"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " +"et d'autres tâches (pas seulement la classification des images CIFAR-10)," +" par exemple le NLP avec Hugging Face Transformers ou la parole avec " +"SpeechBrain." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" +"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " +"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " +"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" +" cela et bien plus encore dans le prochain tutoriel." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " +"`__ va plus en profondeur sur les stratégies et toutes les " +"choses avancées que tu peux construire avec elles." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +#, fuzzy +msgid "Use a federated learning strategy" +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " +"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " +"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons commencer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit dans le carnet " +"d'introduction (toujours en utilisant `Flower `__ et " +"`PyTorch `__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "Dépassons FedAvg avec les stratégies florales !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation), et enveloppons le tout dans " +"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " +"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " +"différents nombres de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "Personnalisation de la stratégie" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" +"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " +"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " +"certain nombre de nouvelles fonctionnalités." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "Paramètres côté serveur **initialisation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"Flower, par défaut, initialise le modèle global en demandant à un client " +"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " +"cependant avoir plus de contrôle sur l'initialisation des paramètres. " +"Flower te permet donc de passer directement les paramètres initiaux à la " +"Stratégie :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "Commencer par une stratégie personnalisée" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"Elle accepte un certain nombre d'arguments, parmi lesquels le " +"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " +"nombre de clients à simuler ``num_clients``, le nombre de rounds " +"``num_rounds``, et la stratégie." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" +"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " +"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " +"différente cette fois-ci :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "Paramètre côté serveur **évaluation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" +"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " +"évaluations côté client et côté serveur sont similaires à certains " +"égards, mais différentes à d'autres." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" +"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " +"conceptuellement simple : elle fonctionne de la même manière que " +"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " +"ensemble de données côté serveur qui peut être utilisé à des fins " +"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " +"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " +"le modèle aux clients. Nous avons également la chance que l'ensemble de " +"notre ensemble de données d'évaluation soit disponible à tout moment." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" +"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " +"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " +"centralisé et nous permet d'évaluer les modèles sur un plus grand " +"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" +" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " +"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" +" représentatifs. Mais cette puissance a un coût : une fois que nous " +"commençons à évaluer côté client, nous devons savoir que notre ensemble " +"de données d'évaluation peut changer au cours des cycles d'apprentissage " +"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " +"l'ensemble de données détenu par chaque client peut également changer au " +"cours des cycles consécutifs. Cela peut conduire à des résultats " +"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " +"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" +" cycles consécutifs." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " +"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " +"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " +"paramètres du modèle agrégé du côté serveur :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#, fuzzy +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " +"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " +"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " +"côté serveur. Nous fournissons une fonction à la stratégie, et la " +"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" +" :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" +"Ensuite, nous allons simplement passer cette fonction à la stratégie " +"FedAvg avant de commencer la simulation :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"Comme nous pouvons le voir, les journaux des clients incluent maintenant " +"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " +"dictionnaire ``config``). Nous pouvons également configurer " +"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" +" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " +"époques au cours du troisième cycle." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"Les clients peuvent également renvoyer des valeurs arbitraires au " +"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " +"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " +"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " +"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " +"tant que troisième valeur de retour dans ``evaluate``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" +"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " +"Flower pour expérimenter avec un grand nombre de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " +"d'entraînement et 5 exemples de validation. Etant donné que le nombre " +"d'exemples d'entraînement sur chaque client est assez faible, nous " +"devrions probablement entraîner le modèle un peu plus longtemps, nous " +"configurons donc les clients pour qu'ils effectuent 3 époques " +"d'entraînement local. Nous devrions également ajuster la fraction de " +"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " +"pas que les 1000 clients participent à chaque tour), nous ajustons donc " +"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" +" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " +"chaque tour :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" +"Dans ce carnet, nous avons vu comment nous pouvons progressivement " +"améliorer notre système en personnalisant la stratégie, en initialisant " +"les paramètres côté serveur, en choisissant une stratégie différente et " +"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " +"si peu de code, n'est-ce pas ?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"Dans les sections ultérieures, nous avons vu comment nous pouvons " +"communiquer des valeurs arbitraires entre le serveur et les clients pour " +"personnaliser entièrement l'exécution côté client. Grâce à cette " +"capacité, nous avons construit une simulation d'apprentissage fédéré à " +"grande échelle en utilisant le moteur de client virtuel Flower et nous " +"avons mené une expérience impliquant 1000 clients dans la même charge de " +"travail - le tout dans un carnet Jupyter !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " +"`__ montre comment construire une ``Stratégie`` entièrement " +"personnalisée à partir de zéro." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#, fuzzy +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " +"construiras ton premier système dans Flower, et tu l'étendras " +"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " +"seras capable de construire des systèmes d'apprentissage fédéré avancés " +"qui se rapprochent de l'état actuel de l'art dans le domaine." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" +"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " +"l'apprentissage fédéré. Seule une compréhension de base de la science des" +" données et de la programmation Python est supposée." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +#, fuzzy +msgid "Let's get started!" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "Apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" +"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " +"rapidement la façon dont la plupart des apprentissages automatiques " +"fonctionnent aujourd'hui." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" +"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" +" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " +"chose d'autre, comme la régression linéaire classique." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "Modèle et données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" +"Nous entraînons le modèle en utilisant les données pour effectuer une " +"tâche utile. Une tâche peut consister à détecter des objets dans des " +"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" +" Go." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "Entraîne le modèle à l'aide des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#, fuzzy +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" +"Dans la pratique, les données d'entraînement avec lesquelles nous " +"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" +" le modèle. Elles sont créées ailleurs." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#, fuzzy +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" +"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" +" avec une application, une voiture qui collecte des données de capteurs, " +"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" +"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " +"chanson." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|cc080a555947492fa66131dc3a967603|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "Données sur un téléphone" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"Il est également important de mentionner que cet \"ailleurs\" n'est " +"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " +"plusieurs appareils fonctionnant tous avec la même application. Mais il " +"peut également s'agir de plusieurs organisations, qui génèrent toutes des" +" données pour la même tâche." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "Les données se trouvent sur de nombreux appareils" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" +"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " +"d'analyse de données, l'approche utilisée par le passé consistait à " +"collecter toutes les données sur un serveur central. Ce serveur peut se " +"trouver quelque part dans un centre de données, ou quelque part dans le " +"cloud." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "Collecte centralisée des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#, fuzzy +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" +"Une fois que toutes les données sont rassemblées en un seul endroit, nous" +" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " +"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " +"automatique sur laquelle nous nous sommes fondamentalement toujours " +"appuyés." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "Formation au modèle central" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "Les défis de l'apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" +"L'approche classique de l'apprentissage automatique que nous venons de " +"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " +"peut citer la catégorisation des photos de vacances, ou l'analyse du " +"trafic web. Des cas, où toutes les données sont naturellement disponibles" +" sur un serveur centralisé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|163117eb654a4273babba413cf8065f5|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "Possibilité de centralisation" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" +"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" +" : lorsque les données ne sont pas disponibles sur un serveur centralisé," +" ou lorsque les données disponibles sur un serveur ne sont pas " +"suffisantes pour former un bon modèle." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "Impossible de centraliser" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#, fuzzy +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" +"Il existe de nombreuses raisons pour lesquelles l'approche classique " +"centralisée de l'apprentissage automatique ne fonctionne pas pour un " +"grand nombre de cas d'utilisation très importants dans le monde réel, " +"notamment :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#, fuzzy +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " +"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " +"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " +"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " +"réglementations protègent les données sensibles contre le déplacement. En" +" fait, ces réglementations empêchent même parfois des organisations " +"individuelles de combiner les données de leurs propres utilisateurs pour " +"la formation à l'intelligence artificielle parce que ces utilisateurs " +"vivent dans différentes parties du monde, et que leurs données sont " +"régies par des réglementations différentes en matière de protection des " +"données." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" +" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " +"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" +" de passe et tes informations de carte de crédit sur le clavier numérique" +" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " +"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" +"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " +"l'apprentissage fédéré a été inventé en premier lieu." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#, fuzzy +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**volume de données** : certains capteurs, comme les caméras, produisent " +"un volume de données si important qu'il n'est ni possible ni économique " +"de collecter toutes les données (en raison, par exemple, de la bande " +"passante ou de l'efficacité des communications). Pensez à un service " +"ferroviaire national comptant des centaines de gares à travers le pays. " +"Si chacune de ces gares est équipée d'un certain nombre de caméras de " +"sécurité, le volume de données brutes sur les appareils qu'elles " +"produisent nécessite une infrastructure incroyablement puissante et " +"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " +"ces données ne sont même pas utiles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" +"Voici quelques exemples où l'apprentissage automatique centralisé ne " +"fonctionne pas :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#, fuzzy +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" +"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " +"former des modèles de détection du cancer" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" +"Informations financières provenant de différentes organisations pour " +"détecter les fraudes financières" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" +"Les données de localisation de ta voiture électrique pour mieux prédire " +"l'autonomie" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" +"Messages cryptés de bout en bout pour former de meilleurs modèles " +"d'autocomplétion" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#, fuzzy +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"La popularité des systèmes améliorant la confidentialité comme le " +"navigateur `Brave `__ ou le messager `Signal " +"`__ montre que les utilisateurs se soucient de la " +"confidentialité. En fait, ils choisissent la version améliorant la " +"confidentialité plutôt que d'autres alternatives, si une telle " +"alternative existe. Mais que pouvons-nous faire pour appliquer " +"l'apprentissage automatique et la science des données à ces cas afin " +"d'utiliser les données privées ? Après tout, ce sont tous des domaines " +"qui bénéficieraient de manière significative des récentes avancées en " +"matière d'IA." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "Apprentissage fédéré" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" +"L'apprentissage fédéré inverse simplement cette approche. Il permet " +"l'apprentissage automatique sur des données distribuées en déplaçant la " +"formation vers les données, au lieu de déplacer les données vers la " +"formation. Voici l'explication en une seule phrase :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "Apprentissage automatique central : déplace les données vers le calcul" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" +"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " +"d'autres approches de science des données) dans des domaines où cela " +"n'était pas possible auparavant. Nous pouvons désormais former " +"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" +" travailler ensemble. Nous pouvons résoudre les fraudes financières en " +"formant des modèles d'IA sur les données de différentes institutions " +"financières. Nous pouvons créer de nouvelles applications d'amélioration " +"de la confidentialité (telles que la messagerie sécurisée) qui ont une " +"meilleure IA intégrée que leurs alternatives d'amélioration de la " +"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" +" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " +"nous découvrons de plus en plus de domaines qui peuvent soudainement être" +" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " +"données auparavant inaccessibles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" +"Comment fonctionne l'apprentissage fédéré ? Commençons par une " +"explication intuitive." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "L'apprentissage fédéré en cinq étapes" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "Étape 0 : Initialisation du modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" +"Nous commençons par initialiser le modèle sur le serveur. C'est " +"exactement la même chose dans l'apprentissage centralisé classique : nous" +" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " +"partir d'un point de contrôle précédemment sauvegardé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|f403fcd69e4e44409627e748b404c086|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "Initialise le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" +"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" +" connectés (nœuds clients)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#, fuzzy +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" +"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " +"connectés (par exemple, les appareils périphériques comme les smartphones" +" ou les serveurs appartenant à des organisations). Cela permet de " +"s'assurer que chaque nœud participant commence sa formation locale en " +"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " +"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " +"est que la sélection d'un nombre croissant de nœuds clients a des " +"rendements décroissants." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|4b00fe63870145968f8443619a792a42|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "Envoyer le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" +"Étape 2 : Entraîne le modèle localement sur les données de chaque " +"organisation/appareil (nœud client)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " +"dernière version des paramètres du modèle global, ils commencent " +"l'entraînement local. Ils utilisent leur propre ensemble de données " +"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " +"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " +"pendant un petit moment. Il peut s'agir d'une seule époque sur les " +"données locales, ou même de quelques étapes (mini-batchs)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|368378731066486fa4397e89bc6b870c|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "Forme-toi aux données locales" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" +"Après l'entraînement local, chaque nœud client possède une version " +"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." +" Les paramètres sont tous différents parce que chaque nœud client a des " +"exemples différents dans son ensemble de données local. Les nœuds clients" +" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " +"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " +"du modèle, soit seulement les gradients qui ont été accumulés au cours de" +" l'entraînement local." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "Envoyer les mises à jour du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" +"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " +"global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"Le serveur reçoit les mises à jour du modèle des nœuds clients " +"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" +" de 100 versions légèrement différentes du modèle global original, " +"chacune ayant été formée sur les données locales d'un client. Mais ne " +"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " +"des données de l'ensemble des 100 nœuds clients ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|82324b9af72a4582a81839d55caab767|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "Mises à jour globales du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " +"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " +"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " +"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" +" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " +"modèle pour obtenir une nouvelle version du modèle global (étape 4)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#, fuzzy +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"Au cours d'un seul tour, chaque nœud client qui participe à cette " +"itération ne s'entraîne que pendant un petit moment. Cela signifie " +"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " +"entraîné sur toutes les données de tous les nœuds clients participants, " +"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " +"processus d'entraînement encore et encore pour finalement arriver à un " +"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " +"données de tous les nœuds clients." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"Félicitations, tu comprends maintenant les bases de l'apprentissage " +"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " +"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" +" tutoriel, nous irons plus en détail. Les questions intéressantes " +"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " +"clients qui devraient participer au prochain tour ? Quelle est la " +"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" +"nous gérer les nœuds clients qui échouent (stragglers) ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#, fuzzy +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"Tout comme nous pouvons former un modèle sur les données décentralisées " +"de différents nœuds clients, nous pouvons également évaluer le modèle sur" +" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" +" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " +"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" +" fédéré." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "Analyses fédérées" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" +"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " +"pour tirer de la valeur des données. L'analyse des données peut donner " +"des indications précieuses, mais là encore, il n'y a souvent pas assez de" +" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" +" gens développent un certain type de problème de santé ? L'analyse " +"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " +"généralement utilisée en conjonction avec d'autres technologies de " +"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " +"empêcher le serveur de voir les résultats soumis par les nœuds clients " +"individuels." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Fleur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " +"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " +"automatique dans les deux sens, les entraîner et les évaluer sur des " +"données locales, puis agréger les modèles mis à jour. Flower fournit " +"l'infrastructure pour faire exactement cela de manière simple, évolutive " +"et sécurisée. En bref, Flower présente une approche unifiée de " +"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " +"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " +"quel cadre de ML et n'importe quel langage de programmation." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" +"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " +"scooter, ordinateur personnel, roomba et téléphone)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" +"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " +"et son rapport avec l'apprentissage automatique classique (centralisé) !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" +"Dans la prochaine partie de ce tutoriel, nous allons construire un " +"premier système d'apprentissage fédéré avec Flower." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " +"`__ " +"montre comment construire un système d'apprentissage fédéré simple avec " +"PyTorch et Flower." + +#~ msgid "Flower CLI commands" +#~ msgstr "Commandes CLI Flower" #~ msgid "Contributor guide" #~ msgstr "Guide pour les contributeurs" -#~ msgid "API Reference - Flower CLI commands" -#~ msgstr "Référence API - Commandes CLI pour Flower" +#~ msgid "API Reference - Flower CLI commands" +#~ msgstr "Référence API - Commandes CLI pour Flower" + +#~ msgid "API Reference - flwr (Python package)" +#~ msgstr "Référence API - flwr (paquetage Python)" + +#~ msgid "Flower client." +#~ msgstr "Client de Flower" + +#~ msgid "Abstract base class for Flower clients." +#~ msgstr "" + +#~ msgid "Evaluate the provided parameters using the locally held dataset." +#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#~ msgid "Parameters" +#~ msgstr "Paramètres du modèle." + +#~ msgid "" +#~ "The evaluation instructions containing " +#~ "(global) model parameters received from " +#~ "the server and a dictionary of " +#~ "configuration values used to customize " +#~ "the local evaluation process." +#~ msgstr "" + +#~ msgid "Returns" +#~ msgstr "Ressources" + +#~ msgid "" +#~ "The evaluation result containing the " +#~ "loss on the local dataset and " +#~ "other details such as the number " +#~ "of local data examples used for " +#~ "evaluation." +#~ msgstr "" + +#~ msgid "Return type" +#~ msgstr "" + +#~ msgid "Refine the provided parameters using the locally held dataset." +#~ msgstr "" + +#~ msgid "" +#~ "The training instructions containing (global)" +#~ " model parameters received from the " +#~ "server and a dictionary of configuration" +#~ " values used to customize the local" +#~ " training process." +#~ msgstr "" + +#~ msgid "" +#~ "The training result containing updated " +#~ "parameters and other details such as " +#~ "the number of local training examples" +#~ " used for training." +#~ msgstr "" + +#~ msgid "Return the current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "The get parameters instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "Return set of client's properties." +#~ msgstr "" + +#~ msgid "" +#~ "The get properties instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current client properties." +#~ msgstr "" + +#~ msgid "Start a Flower client node which connects to a Flower server." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " server. If the Flower server runs" +#~ " on the same machine on port " +#~ "8080, then `server_address` would be " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower server. The default should be " +#~ "sufficient for most models. Users who" +#~ " train very large models might need" +#~ " to increase this value. Note that" +#~ " the Flower server needs to be " +#~ "started with the same value (see " +#~ "`flwr.server.start_server`), otherwise it will " +#~ "not know about the increased limit " +#~ "and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "The PEM-encoded root certificates as " +#~ "a byte string or a path string." +#~ " If provided, a secure connection " +#~ "using the certificates will be " +#~ "established to an SSL-enabled Flower " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "DEPRECATED - USE 'transport' INSTEAD. " +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." +#~ msgstr "" +#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " +#~ "PLACE Définit si le client interagit " +#~ "ou non avec le serveur à l'aide" +#~ " de l'API REST expérimentale. Cette " +#~ "fonctionnalité est expérimentale, elle " +#~ "pourrait changer considérablement dans les " +#~ "futures versions de Flower." + +#~ msgid "" +#~ "Configure the transport layer. Allowed " +#~ "values: - 'grpc-bidi': gRPC, " +#~ "bidirectional streaming - 'grpc-rere': " +#~ "gRPC, request-response (experimental) - " +#~ "'rest': HTTP (experimental)" +#~ msgstr "" +#~ "Valeurs autorisées : - 'grpc-bidi' " +#~ ": gRPC, flux bidirectionnel - 'grpc-" +#~ "rere' : gRPC, requête-réponse " +#~ "(expérimental) - 'rest' : HTTP " +#~ "(expérimental)" + +#~ msgid "Starting a gRPC client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting an SSL-enabled gRPC client:" +#~ msgstr "" + +#~ msgid "Abstract base class for Flower clients using NumPy." +#~ msgstr "" + +#~ msgid "The current (global) model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence evaluation on the" +#~ " client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to influence the number of examples " +#~ "used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "* **loss** (*float*) -- The evaluation" +#~ " loss of the model on the local" +#~ " dataset. * **num_examples** (*int*) -- " +#~ "The number of examples used for " +#~ "evaluation. * **metrics** (*Dict[str, " +#~ "Scalar]*) -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to " +#~ "communicate arbitrary values back to the" +#~ " server." +#~ msgstr "" + +#~ msgid "" +#~ "**loss** (*float*) -- The evaluation " +#~ "loss of the model on the local " +#~ "dataset." +#~ msgstr "" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "" +#~ "The previous return type format (int," +#~ " float, float) and the extended " +#~ "format (int, float, float, Dict[str, " +#~ "Scalar]) have been deprecated and " +#~ "removed since Flower 0.19." +#~ msgstr "" + +#~ msgid "Train the provided parameters using the locally held dataset." +#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence training on the " +#~ "client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to set the number of (local) " +#~ "training epochs." +#~ msgstr "" + +#~ msgid "" +#~ "* **parameters** (*NDArrays*) -- The " +#~ "locally updated model parameters. * " +#~ "**num_examples** (*int*) -- The number " +#~ "of examples used for training. * " +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for training." +#~ msgstr "" + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which parameters are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- The local model " +#~ "parameters as a list of NumPy " +#~ "ndarrays." +#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#~ msgid "Return a client's set of properties." +#~ msgstr "Renvoie l'ensemble des propriétés d'un client." + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which properties are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**properties** -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to communicate" +#~ " arbitrary property values back to " +#~ "the server." +#~ msgstr "" + +#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#~ msgstr "" + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting a SSL-enabled client:" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type ClientLike. Note that the " +#~ "created client instances are ephemeral " +#~ "and will often be destroyed after " +#~ "a single method invocation. Since client" +#~ " instances are not long-lived, they" +#~ " should not attempt to carry state" +#~ " over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" +#~ "Une fonction créant des instances de " +#~ "client. La fonction doit prendre un " +#~ "seul argument `str` appelé `cid`. Elle" +#~ " doit retourner une seule instance de" +#~ " client de type ClientLike. Notez que" +#~ " les instances de client créées sont" +#~ " éphémères et seront souvent détruites " +#~ "après une seule invocation de méthode." +#~ " Puisque les instances de client ne" +#~ " sont pas de longue durée, elles " +#~ "ne doivent pas essayer de transporter" +#~ " l'état sur les invocations de " +#~ "méthode. Tout état requis par l'instance" +#~ " (modèle, jeu de données, hyperparamètres," +#~ " ...) doit être (re)créé dans l'appel" +#~ " à `client_fn` ou dans l'appel à " +#~ "n'importe quelle méthode de client (par" +#~ " exemple, charger les données d'évaluation" +#~ " dans la méthode `evaluate` elle-" +#~ "même)." + +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" + +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," +#~ " \"num_gpus\": 1}`. To understand the " +#~ "GPU utilization caused by `num_gpus`, " +#~ "consult the Ray documentation on GPU " +#~ "support." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "Currently supported values are `num_rounds`" +#~ " (int, default: 1) and `round_timeout` " +#~ "in seconds (float, default: None)." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "Flower server." +#~ msgstr "Serveur de Flower" + +#~ msgid "Start a Flower server using the gRPC transport layer." +#~ msgstr "" + +#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_server` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.strategy.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_server` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower clients. The default should be" +#~ " sufficient for most models. Users " +#~ "who train very large models might " +#~ "need to increase this value. Note " +#~ "that the Flower clients need to be" +#~ " started with the same value (see " +#~ "`flwr.client.start_client`), otherwise clients will" +#~ " not know about the increased limit" +#~ " and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ " * CA certificate. * server " +#~ "certificate. * server private key." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ msgstr "" + +#~ msgid "CA certificate." +#~ msgstr "Certificats" + +#~ msgid "server certificate." +#~ msgstr "Certificats" + +#~ msgid "server private key." +#~ msgstr "stratégie.du.serveur" + +#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgstr "" + +#~ msgid "Starting an insecure server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Starting an SSL-enabled server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgstr "" + +#~ msgid "Abstract base class for server strategy implementations." +#~ msgstr "" + +#~ msgid "The current round of federated learning." +#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes` constitutes " +#~ "a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "Exceptions that occurred while the " +#~ "server was waiting for client updates." +#~ msgstr "" + +#~ msgid "" +#~ "**aggregation_result** -- The aggregated " +#~ "evaluation result. Aggregation typically uses" +#~ " some variant of a weighted average." +#~ msgstr "" + +#~ msgid "Aggregate training results." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes)` constitutes" +#~ " a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the new global model " +#~ "parameters (i.e., it will replace the" +#~ " previous parameters with the ones " +#~ "returned from this method). If `None`" +#~ " is returned (e.g., because there " +#~ "were only failures and no viable " +#~ "results) then the server will no " +#~ "update the previous model parameters, " +#~ "the updates received in this round " +#~ "are discarded, and the global model " +#~ "parameters remain the same." +#~ msgstr "" + +#~ msgid "Configure the next round of evaluation." +#~ msgstr "Configuration de l'évaluation côté serveur" + +#~ msgid "The client manager which holds all currently connected clients." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluate_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`EvaluateIns` for this particular " +#~ "`ClientProxy`. If a particular `ClientProxy`" +#~ " is not included in this list, " +#~ "it means that this `ClientProxy` will" +#~ " not participate in the next round" +#~ " of federated evaluation." +#~ msgstr "" + +#~ msgid "Configure the next round of training." +#~ msgstr "" + +#~ msgid "" +#~ "**fit_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`FitIns` for this particular `ClientProxy`." +#~ " If a particular `ClientProxy` is not" +#~ " included in this list, it means " +#~ "that this `ClientProxy` will not " +#~ "participate in the next round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "Evaluate the current model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "This function can be used to " +#~ "perform centralized (i.e., server-side) " +#~ "evaluation of model parameters." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluation_result** -- The evaluation " +#~ "result, usually a Tuple containing loss" +#~ " and a dictionary containing task-" +#~ "specific metrics (e.g., accuracy)." +#~ msgstr "" + +#~ msgid "Initialize the (global) model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the initial global model " +#~ "parameters." +#~ msgstr "" + +#~ msgid "Configurable FedAvg strategy implementation." +#~ msgstr "Configuration de l'évaluation fédérée" + +#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "training. In case `min_fit_clients` is " +#~ "larger than `fraction_fit * " +#~ "available_clients`, `min_fit_clients` will still " +#~ "be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "validation. In case `min_evaluate_clients` is" +#~ " larger than `fraction_evaluate * " +#~ "available_clients`, `min_evaluate_clients` will " +#~ "still be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during training. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgstr "" + +#~ msgid "Optional function used for validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure training. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Whether or not accept rounds containing failures. Defaults to True." +#~ msgstr "" + +#~ msgid "Initial global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Metrics aggregation function, optional." +#~ msgstr "" + +#~ msgid "Aggregate evaluation losses using weighted average." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "Aggregate fit results using weighted average." +#~ msgstr "" + +#~ msgid "Evaluate model parameters using an evaluation function." +#~ msgstr "" + +#~ msgid "Initialize global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Use a fraction of available clients for evaluation." +#~ msgstr "" + +#~ msgid "Return the sample size and the required number of available clients." +#~ msgstr "" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Averaging with Momentum strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +#~ msgstr "" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "" +#~ "Server-side learning rate used in " +#~ "server-side optimization. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "" + +#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgstr "" +#~ "FedYogi - Stratégie d'apprentissage fédéré " +#~ "utilisant Yogi côté serveur. Mise en " +#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" + +#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgstr "" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optimization strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgstr "" + +#~ msgid "" +#~ "The strategy in itself will not be" +#~ " different than FedAvg, the client " +#~ "needs to be adjusted. A proximal " +#~ "term needs to be added to the " +#~ "loss function during the training:" +#~ msgstr "" + +#~ msgid "" +#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +#~ "\n" +#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" + +#~ msgid "" +#~ "Where $w^t$ are the global parameters" +#~ " and $w$ are the local weights " +#~ "the function will be optimized with." +#~ msgstr "" + +#~ msgid "In PyTorch, for example, the loss would go from:" +#~ msgstr "" + +#~ msgid "To:" +#~ msgstr "" + +#~ msgid "" +#~ "With `global_params` being a copy of " +#~ "the parameters before the training takes" +#~ " place." +#~ msgstr "" + +#~ msgid "" +#~ "The weight of the proximal term " +#~ "used in the optimization. 0.0 makes " +#~ "this strategy equivalent to FedAvg, and" +#~ " the higher the coefficient, the more" +#~ " regularization will be used (that " +#~ "is, the client parameters will need " +#~ "to be closer to the server " +#~ "parameters during training)." +#~ msgstr "" + +#~ msgid "Sends the proximal factor mu to the clients" +#~ msgstr "" -#~ msgid "API Reference - flwr (Python package)" -#~ msgstr "Référence API - flwr (paquetage Python)" +#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgstr "" +#~ "Stratégie FedAdagrad - Optimisation fédérée" +#~ " adaptative à l'aide d'Adagrad." -#~ msgid "Flower client." -#~ msgstr "Client de Flower" +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" + +#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." +#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." + +#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgstr "" + +#~ msgid "FedYogi [Reddi et al., 2020] strategy." +#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "L'apprentissage fédéré en cinq étapes" + +#~ msgid "Differential Privacy Wrappers in Flower" +#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" + +#~ msgid "Evaluation" +#~ msgstr "Solution" + +#~ msgid "Code examples" +#~ msgstr "Exemple de code complet" + +#~ msgid "" +#~ "Flower Quickstart (PyTorch): coming soon " +#~ "(the TensorFlow/Keras example can easily " +#~ "be changed to make it work with" +#~ " PyTorch)" +#~ msgstr "" + +#~ msgid "First time contributors" +#~ msgstr "Bonnes premières contributions" + +#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgstr "" + +#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Logging" +#~ msgstr "Enregistrement" + +#~ msgid "|cce04c6f539b421a91f5dba40287193f|" +#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" + +#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" +#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" + +#~ msgid "|7e028f44defe4f31a02debc729f2010d|" +#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" + +#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" +#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" + +#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" +#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" + +#~ msgid "|a3246766a6db412888131b3bcdad0971|" +#~ msgstr "|a3246766a6db412888131b3bcdad0971|" + +#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" +#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" + +#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" +#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" + +#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" +#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" + +#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" +#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" + +#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" +#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" + +#~ msgid "|1fedb4f8714947e1b13f03696180c741|" +#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" + +#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" +#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" + +#~ msgid "|3531696c52904cd3b9944034ab959d48|" +#~ msgstr "|3531696c52904cd3b9944034ab959d48|" + +#~ msgid "An Introduction to Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Strategies in Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Building a Strategy" +#~ msgstr "Stratégies intégrées" + +#~ msgid "Client and NumPyClient" +#~ msgstr "NumPyClient" + +#~ msgid "Strategies" +#~ msgstr "Stratégies personnalisées" + +#~ msgid "SSL-enabled Server and Client" +#~ msgstr "" + +#~ msgid "About these documents" +#~ msgstr "À propos de ces documents" + +#~ msgid "Index" +#~ msgstr "Index" + +#~ msgid "Search" +#~ msgstr "Recherche" + +#~ msgid "Copyright" +#~ msgstr "Droits d'auteur" + +#~ msgid "Save Progress" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server does not prescribe " +#~ "a way to persist model updates or" +#~ " evaluation results. Flower does not " +#~ "(yet) automatically save model updates " +#~ "on the server-side. It's on the" +#~ " roadmap to provide a built-in " +#~ "way of doing this." +#~ msgstr "" + +#~ msgid "Release Process" +#~ msgstr "Publier Flower" + +#~ msgid "Virtual Env Installation" +#~ msgstr "Virtualenv avec Anaconda" + +#~ msgid "Install development versions" +#~ msgstr "Installer les versions de développement de Flower" + +#~ msgid "Set up a virtual env" +#~ msgstr "Mettre en place un environment virtuel" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" +#~ "Notez que, pour construire la " +#~ "documentation localement (avec ``poetry run" +#~ " make html``, comme décrit ci-" +#~ "dessous), ``Pandoc _`" +#~ " doit être installé sur le système." + +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" + +#~ msgid "XGBoost" +#~ msgstr "XGBoost" + +#~ msgid "Android ONNX on-device training" +#~ msgstr "" +#~ "Utiliser Android ONNX pour faire du " +#~ "training directement sur le téléphone" + +#~ msgid "Contribute on GitHub" +#~ msgstr "Contribuer sur GitHub" + +#~ msgid "How to write a good PR title" +#~ msgstr "Comment écrire un bon titre de PR" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" +#~ "Un titre de PR bien choisi permet" +#~ " aux autres développeurs de rapidement " +#~ "comprendre l'intérêt et le scope des " +#~ "changements proposés. Voici un guide " +#~ "pour vous aider à écrire des bons" +#~ " titres de PR :" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. Soyez clair et concis : Donnez" +#~ " un résumé clair des changements de" +#~ " manière concise. 1. Utilisez des " +#~ "verbes actionnables : Commencez par des" +#~ " verbes comme \"Add\", \"Update\", ou " +#~ "\"Fix\" pour indiquer le but. 1. " +#~ "Inclure des renseignements pertinents : " +#~ "Mentionner la caractéristique ou le " +#~ "module concerné pour le contexte. 1. " +#~ "Gardez le court : Évitez les longs" +#~ " titres pour une lisibilité facile. " +#~ "1. Utiliser une bonne capitalisation et" +#~ " une ponctuation : Suivre les règles" +#~ " de grammaire pour la clarté." + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" +#~ "Commençons par quelques exemples de " +#~ "titres qui devraient être évités parce" +#~ " qu'ils ne fournissent pas d'information" +#~ " significative :" -#~ msgid "Abstract base class for Flower clients." -#~ msgstr "" +#~ msgid "Implement Algorithm" +#~ msgstr "Implement Algorithm" -#~ msgid "Evaluate the provided parameters using the locally held dataset." -#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#~ msgid "Database" +#~ msgstr "Base de données" -#~ msgid "Parameters" -#~ msgstr "Paramètres du modèle." +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "Add my_new_file.py to codebase" -#~ msgid "" -#~ "The evaluation instructions containing " -#~ "(global) model parameters received from " -#~ "the server and a dictionary of " -#~ "configuration values used to customize " -#~ "the local evaluation process." -#~ msgstr "" +#~ msgid "Improve code in module" +#~ msgstr "Improve code in module" -#~ msgid "Returns" -#~ msgstr "Ressources" +#~ msgid "Change SomeModule" +#~ msgstr "Change SomeModule" #~ msgid "" -#~ "The evaluation result containing the " -#~ "loss on the local dataset and " -#~ "other details such as the number " -#~ "of local data examples used for " -#~ "evaluation." +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" #~ msgstr "" +#~ "Voici quelques bons exemples qui " +#~ "fournissent de l'information utile sans " +#~ "répéter comment ils le font, comme " +#~ "cela est déjà visible dans la " +#~ "section \"Files changed\" de la PR " +#~ ":" -#~ msgid "Return type" -#~ msgstr "" +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "Update docs banner to mention Flower Summit 2023" -#~ msgid "Refine the provided parameters using the locally held dataset." -#~ msgstr "" +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "Remove unnecessary XGBoost dependency" -#~ msgid "" -#~ "The training instructions containing (global)" -#~ " model parameters received from the " -#~ "server and a dictionary of configuration" -#~ " values used to customize the local" -#~ " training process." +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" #~ msgstr "" +#~ "Ajoute une tâche CI pour déployer " +#~ "le système de mise en scène " +#~ "lorsque la branche `main` change" #~ msgid "" -#~ "The training result containing updated " -#~ "parameters and other details such as " -#~ "the number of local training examples" -#~ " used for training." +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" #~ msgstr "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" -#~ msgid "Return the current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "Differential privacy" +#~ msgstr "Confidentialité différentielle" #~ msgid "" -#~ "The get parameters instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." +#~ "The Flower server does not prescribe " +#~ "a way to aggregate evaluation results," +#~ " but it enables the user to " +#~ "fully customize result aggregation." #~ msgstr "" -#~ msgid "The current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "Configure logging" +#~ msgstr "Configurer les clients" -#~ msgid "Return set of client's properties." +#~ msgid "" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" #~ msgstr "" +#~ "L'enregistreur de Flower garde la trace" +#~ " de tous les événements principaux " +#~ "qui ont lieu dans les charges de" +#~ " travail de l'apprentissage fédéré. Il " +#~ "présente les informations par défaut en" +#~ " suivant un format de message " +#~ "standard :" #~ msgid "" -#~ "The get properties instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" #~ msgstr "" +#~ "contenant des informations pertinentes, " +#~ "notamment : le niveau du message " +#~ "de journal (par exemple :code:`INFO`, " +#~ ":code:`DEBUG`), un horodatage, la ligne " +#~ "à partir de laquelle l'enregistrement a" +#~ " eu lieu, ainsi que le message " +#~ "de journal lui-même. De cette " +#~ "façon, le logger afficherait typiquement " +#~ "des informations sur ton terminal comme" +#~ " suit :" -#~ msgid "The current client properties." -#~ msgstr "" +#~ msgid "Saving log to file" +#~ msgstr "Enregistrement du journal dans un fichier" -#~ msgid "Start a Flower client node which connects to a Flower server." +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" #~ msgstr "" +#~ "Par défaut, le journal de Flower " +#~ "est affiché dans le terminal à " +#~ "partir duquel tu as lancé ta " +#~ "charge de travail d'apprentissage fédéré. " +#~ "Cela s'applique à la fois à la " +#~ "fédération basée sur gRPC (c'est-à-dire " +#~ "lorsque tu fais :code:`fl.server.start_server`) " +#~ "et à l'utilisation du " +#~ ":code:`VirtualClientEngine` (c'est-à-dire lorsque tu" +#~ " fais :code:`fl.simulation.start_simulation`). Dans " +#~ "certaines situations, tu peux vouloir " +#~ "sauvegarder ce journal sur le disque." +#~ " Tu peux le faire en appelant " +#~ "la fonction `fl.common.logger.configure() " +#~ "`_." +#~ " Par exemple :" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " server. If the Flower server runs" -#~ " on the same machine on port " -#~ "8080, then `server_address` would be " -#~ "`\"[::]:8080\"`." +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" #~ msgstr "" +#~ "Avec ce qui précède, Flower enregistrera" +#~ " le journal que tu vois sur ton" +#~ " terminal dans :code:`log.txt`. Ce fichier" +#~ " sera créé dans le même répertoire" +#~ " que celui à partir duquel tu " +#~ "exécutes le code. Si nous inspectons," +#~ " nous voyons que le journal ci-" +#~ "dessus est également enregistré, mais en" +#~ " préfixant chaque ligne avec " +#~ ":code:`identifier` :" -#~ msgid "An implementation of the abstract base class `flwr.client.Client`." -#~ msgstr "" +#~ msgid "Log your own messages" +#~ msgstr "Enregistrer tes propres messages" #~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower server. The default should be " -#~ "sufficient for most models. Users who" -#~ " train very large models might need" -#~ " to increase this value. Note that" -#~ " the Flower server needs to be " -#~ "started with the same value (see " -#~ "`flwr.server.start_server`), otherwise it will " -#~ "not know about the increased limit " -#~ "and block larger messages." +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." #~ msgstr "" +#~ "Tu peux élargir les informations " +#~ "affichées par défaut avec le logger " +#~ "Flower en ajoutant d'autres messages " +#~ "pertinents pour ton application. Tu peux" +#~ " y parvenir facilement en procédant " +#~ "comme suit." #~ msgid "" -#~ "The PEM-encoded root certificates as " -#~ "a byte string or a path string." -#~ " If provided, a secure connection " -#~ "using the certificates will be " -#~ "established to an SSL-enabled Flower " -#~ "server." +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." #~ msgstr "" +#~ "De cette façon, ton logger affichera," +#~ " en plus des messages par défaut, " +#~ "ceux introduits par les clients comme" +#~ " spécifié ci-dessus." -#~ msgid "" -#~ "DEPRECATED - USE 'transport' INSTEAD. " -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." -#~ msgstr "" -#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " -#~ "PLACE Définit si le client interagit " -#~ "ou non avec le serveur à l'aide" -#~ " de l'API REST expérimentale. Cette " -#~ "fonctionnalité est expérimentale, elle " -#~ "pourrait changer considérablement dans les " -#~ "futures versions de Flower." +#~ msgid "Log to a remote service" +#~ msgstr "Se connecter à un service distant" #~ msgid "" -#~ "Configure the transport layer. Allowed " -#~ "values: - 'grpc-bidi': gRPC, " -#~ "bidirectional streaming - 'grpc-rere': " -#~ "gRPC, request-response (experimental) - " -#~ "'rest': HTTP (experimental)" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." #~ msgstr "" -#~ "Valeurs autorisées : - 'grpc-bidi' " -#~ ": gRPC, flux bidirectionnel - 'grpc-" -#~ "rere' : gRPC, requête-réponse " -#~ "(expérimental) - 'rest' : HTTP " -#~ "(expérimental)" +#~ "La fonction :code:`fl.common.logger.configure` " +#~ "permet également de spécifier un hôte" +#~ " vers lequel les journaux peuvent " +#~ "être envoyés (via :code:`POST`) par " +#~ "l'intermédiaire d'un :code:`logging.handler.HTTPHandler`" +#~ " natif de Python. Il s'agit d'une " +#~ "fonction particulièrement utile dans les " +#~ "charges de travail d'apprentissage fédéré " +#~ "basées sur :code:`gRPC` où la collecte" +#~ " des journaux de toutes les entités" +#~ " (c'est-à-dire le serveur et les " +#~ "clients) pourrait s'avérer fastidieuse. Notez" +#~ " que dans la simulation Flower, le" +#~ " serveur affiche automatiquement tous les" +#~ " journaux. Vous pouvez toujours spécifier" +#~ " un :code:`HTTPHandler` si vous souhaitez" +#~ " sauvegarder ou analyser les journaux " +#~ "à un autre endroit." -#~ msgid "Starting a gRPC client with an insecure server connection:" -#~ msgstr "" +#~ msgid "Enable SSL connections" +#~ msgstr "Collecte centralisée des données" -#~ msgid "Starting an SSL-enabled gRPC client:" -#~ msgstr "" +#~ msgid "Python version" +#~ msgstr "Version Python" -#~ msgid "Abstract base class for Flower clients using NumPy." +#~ msgid "" +#~ "Flower requires at least `Python 3.7 " +#~ "`_, but `Python 3.8" +#~ " `_ or above is " +#~ "recommended." #~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." -#~ msgid "The current (global) model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "Run simulations" +#~ msgstr "Simulation de moniteur" #~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence evaluation on the" -#~ " client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to influence the number of examples " -#~ "used for evaluation." +#~ "Simulating Federated Learning workloads is " +#~ "useful for a multitude of use-" +#~ "cases: you might want to run your" +#~ " workload on a large cohort of " +#~ "clients but without having to source," +#~ " configure and mange a large number" +#~ " of physical devices; you might want" +#~ " to run your FL workloads as " +#~ "fast as possible on the compute " +#~ "systems you have access to without " +#~ "having to go through a complex " +#~ "setup process; you might want to " +#~ "validate your algorithm on different " +#~ "scenarios at varying levels of data " +#~ "and system heterogeneity, client availability," +#~ " privacy budgets, etc. These are " +#~ "among some of the use-cases where" +#~ " simulating FL workloads makes sense. " +#~ "Flower can accommodate these scenarios " +#~ "by means of its `VirtualClientEngine " +#~ "`_ or VCE." #~ msgstr "" #~ msgid "" -#~ "* **loss** (*float*) -- The evaluation" -#~ " loss of the model on the local" -#~ " dataset. * **num_examples** (*int*) -- " -#~ "The number of examples used for " -#~ "evaluation. * **metrics** (*Dict[str, " -#~ "Scalar]*) -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to " -#~ "communicate arbitrary values back to the" -#~ " server." +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" #~ msgstr "" #~ msgid "" -#~ "**loss** (*float*) -- The evaluation " -#~ "loss of the model on the local " -#~ "dataset." +#~ "resource-aware: this means that each " +#~ "client gets assigned a portion of " +#~ "the compute and memory on your " +#~ "system. You as a user can control" +#~ " this at the beginning of the " +#~ "simulation and allows you to control " +#~ "the degree of parallelism of your " +#~ "Flower FL simulation. The fewer the " +#~ "resources per client, the more clients" +#~ " can run concurrently on the same " +#~ "hardware." #~ msgstr "" -#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." #~ msgstr "" #~ msgid "" -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." +#~ "ephemeral: this means that a client " +#~ "is only materialized when it is " +#~ "required in the FL process (e.g. " +#~ "to do `fit() `_). The object is" +#~ " destroyed afterwards, releasing the " +#~ "resources it was assigned and allowing" +#~ " in this way other clients to " +#~ "participate." #~ msgstr "" #~ msgid "" -#~ "The previous return type format (int," -#~ " float, float) and the extended " -#~ "format (int, float, float, Dict[str, " -#~ "Scalar]) have been deprecated and " -#~ "removed since Flower 0.19." +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." #~ msgstr "" -#~ msgid "Train the provided parameters using the locally held dataset." -#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" - -#~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence training on the " -#~ "client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to set the number of (local) " -#~ "training epochs." +#~ msgid "Launch your Flower simulation" #~ msgstr "" #~ msgid "" -#~ "* **parameters** (*NDArrays*) -- The " -#~ "locally updated model parameters. * " -#~ "**num_examples** (*int*) -- The number " -#~ "of examples used for training. * " -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" #~ msgstr "" -#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "VirtualClientEngine resources" +#~ msgstr "Moteur de client virtuel" -#~ msgid "**num_examples** (*int*) -- The number of examples used for training." +#~ msgid "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." #~ msgstr "" -#~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which parameters are " -#~ "needed along with some Scalar " -#~ "attributes." +#~ msgid "Assigning client resources" #~ msgstr "" #~ msgid "" -#~ "**parameters** -- The local model " -#~ "parameters as a list of NumPy " -#~ "ndarrays." -#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#~ msgid "Return a client's set of properties." -#~ msgstr "Renvoie l'ensemble des propriétés d'un client." +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." +#~ msgstr "" #~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which properties are " -#~ "needed along with some Scalar " -#~ "attributes." +#~ "More often than not, you would " +#~ "probably like to adjust the resources" +#~ " your clients get assigned based on" +#~ " the complexity (i.e. compute and " +#~ "memory footprint) of your FL workload." +#~ " You can do so when starting " +#~ "your simulation by setting the argument" +#~ " `client_resources` to `start_simulation `_. Two " +#~ "keys are internally used by Ray to" +#~ " schedule and spawn workloads (in our" +#~ " case Flower clients):" #~ msgstr "" -#~ msgid "" -#~ "**properties** -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to communicate" -#~ " arbitrary property values back to " -#~ "the server." +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." #~ msgstr "" -#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." +#~ msgid "" +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." #~ msgstr "" -#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#~ msgid "Let's see a few examples:" #~ msgstr "" -#~ msgid "Starting a client with an insecure server connection:" +#~ msgid "" +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." #~ msgstr "" -#~ msgid "Starting a SSL-enabled client:" +#~ msgid "" +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." #~ msgstr "" -#~ msgid "Start a Ray-based Flower simulation server." -#~ msgstr "Simulation de moniteur" +#~ msgid "Simulation examples" +#~ msgstr "Exemples de PyTorch" #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type ClientLike. Note that the " -#~ "created client instances are ephemeral " -#~ "and will often be destroyed after " -#~ "a single method invocation. Since client" -#~ " instances are not long-lived, they" -#~ " should not attempt to carry state" -#~ " over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" #~ msgstr "" -#~ "Une fonction créant des instances de " -#~ "client. La fonction doit prendre un " -#~ "seul argument `str` appelé `cid`. Elle" -#~ " doit retourner une seule instance de" -#~ " client de type ClientLike. Notez que" -#~ " les instances de client créées sont" -#~ " éphémères et seront souvent détruites " -#~ "après une seule invocation de méthode." -#~ " Puisque les instances de client ne" -#~ " sont pas de longue durée, elles " -#~ "ne doivent pas essayer de transporter" -#~ " l'état sur les invocations de " -#~ "méthode. Tout état requis par l'instance" -#~ " (modèle, jeu de données, hyperparamètres," -#~ " ...) doit être (re)créé dans l'appel" -#~ " à `client_fn` ou dans l'appel à " -#~ "n'importe quelle méthode de client (par" -#~ " exemple, charger les données d'évaluation" -#~ " dans la méthode `evaluate` elle-" -#~ "même)." #~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." #~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" #~ msgid "" -#~ "List `client_id`s for each client. This" -#~ " is only required if `num_clients` is" -#~ " not set. Setting both `num_clients` " -#~ "and `clients_ids` with `len(clients_ids)` not" -#~ " equal to `num_clients` generates an " -#~ "error." +#~ "`PyTorch Simulation " +#~ "`_: 100 clients collaboratively train" +#~ " a CNN model on MNIST." #~ msgstr "" +#~ "`Quickstart PyTorch (Code) " +#~ "`_" #~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," -#~ " \"num_gpus\": 1}`. To understand the " -#~ "GPU utilization caused by `num_gpus`, " -#~ "consult the Ray documentation on GPU " -#~ "support." +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" +#~ msgstr "" + +#~ msgid "Have the same Python environment in all nodes." +#~ msgstr "" + +#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" #~ msgstr "" #~ msgid "" -#~ "Currently supported values are `num_rounds`" -#~ " (int, default: 1) and `round_timeout` " -#~ "in seconds (float, default: None)." +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ "With all the above done, you can" +#~ " run your code from the head " +#~ "node as you would if the " +#~ "simulation was running on a single " +#~ "node." #~ msgstr "" #~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." #~ msgstr "" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "Multi-node simulation good-to-know" #~ msgstr "" #~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" #~ msgstr "" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." #~ msgstr "" -#~ msgid "**hist** -- Object containing metrics from training." +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" #~ msgstr "" -#~ msgid "Flower server." -#~ msgstr "Serveur de Flower" - -#~ msgid "Start a Flower server using the gRPC transport layer." -#~ msgstr "" +#~ msgid "Considerations for simulations" +#~ msgstr "Simulation de moniteur" -#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgid "" +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_server` will create one." +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." #~ msgstr "" +#~ msgid "GPU resources" +#~ msgstr "Ressources" + #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.strategy.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_server` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." #~ msgstr "" #~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower clients. The default should be" -#~ " sufficient for most models. Users " -#~ "who train very large models might " -#~ "need to increase this value. Note " -#~ "that the Flower clients need to be" -#~ " started with the same value (see " -#~ "`flwr.client.start_client`), otherwise clients will" -#~ " not know about the increased limit" -#~ " and block larger messages." +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" #~ msgstr "" #~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" -#~ " * CA certificate. * server " -#~ "certificate. * server private key." +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" #~ msgstr "" #~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." #~ msgstr "" -#~ msgid "CA certificate." -#~ msgstr "Certificats" - -#~ msgid "server certificate." -#~ msgstr "Certificats" - -#~ msgid "server private key." -#~ msgstr "stratégie.du.serveur" - -#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgid "" +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" -#~ msgid "Starting an insecure server:" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Starting an SSL-enabled server:" -#~ msgstr "Démarrer le serveur" +#~ msgid "TensorFlow with GPUs" +#~ msgstr "Exemples de TensorFlow" -#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgid "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." #~ msgstr "" -#~ msgid "Abstract base class for server strategy implementations." +#~ msgid "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" #~ msgstr "" -#~ msgid "The current round of federated learning." -#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" - #~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes` constitutes " -#~ "a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." +#~ "This is precisely the mechanism used " +#~ "in `Tensorflow/Keras Simulation " +#~ "`_ example." +#~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" + +#~ msgid "Multi-node setups" #~ msgstr "" #~ msgid "" -#~ "Exceptions that occurred while the " -#~ "server was waiting for client updates." +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." #~ msgstr "" #~ msgid "" -#~ "**aggregation_result** -- The aggregated " -#~ "evaluation result. Aggregation typically uses" -#~ " some variant of a weighted average." +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." #~ msgstr "" -#~ msgid "Aggregate training results." -#~ msgstr "Résultats globaux de l'évaluation." +#~ msgid "Save and load model checkpoints" +#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" #~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes)` constitutes" -#~ " a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." +#~ "Flower does not automatically save model" +#~ " updates on the server-side. This " +#~ "how-to guide describes the steps " +#~ "to save (and load) model checkpoints " +#~ "in Flower." #~ msgstr "" -#~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the new global model " -#~ "parameters (i.e., it will replace the" -#~ " previous parameters with the ones " -#~ "returned from this method). If `None`" -#~ " is returned (e.g., because there " -#~ "were only failures and no viable " -#~ "results) then the server will no " -#~ "update the previous model parameters, " -#~ "the updates received in this round " -#~ "are discarded, and the global model " -#~ "parameters remain the same." +#~ msgid "Legacy example guides" #~ msgstr "" -#~ msgid "Configure the next round of evaluation." -#~ msgstr "Configuration de l'évaluation côté serveur" +#~ msgid "Contributor tutorials" +#~ msgstr "Configuration du contributeur" -#~ msgid "The client manager which holds all currently connected clients." -#~ msgstr "" +#~ msgid "Contributor explanations" +#~ msgstr "Explications" -#~ msgid "" -#~ "**evaluate_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`EvaluateIns` for this particular " -#~ "`ClientProxy`. If a particular `ClientProxy`" -#~ " is not included in this list, " -#~ "it means that this `ClientProxy` will" -#~ " not participate in the next round" -#~ " of federated evaluation." -#~ msgstr "" +#~ msgid "Flower Framework Documentation" +#~ msgstr "Documentation de Flower" -#~ msgid "Configure the next round of training." -#~ msgstr "" +#~ msgid "PyTorch" +#~ msgstr "Exemples de PyTorch" -#~ msgid "" -#~ "**fit_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`FitIns` for this particular `ClientProxy`." -#~ " If a particular `ClientProxy` is not" -#~ " included in this list, it means " -#~ "that this `ClientProxy` will not " -#~ "participate in the next round of " -#~ "federated learning." -#~ msgstr "" +#~ msgid "TensorFlow" +#~ msgstr "TensorFlow" -#~ msgid "Evaluate the current model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "Flower CLI reference" +#~ msgstr "Client de Flower" -#~ msgid "" -#~ "This function can be used to " -#~ "perform centralized (i.e., server-side) " -#~ "evaluation of model parameters." -#~ msgstr "" +#~ msgid "flwr (Python API reference)" +#~ msgstr "Référence pour l'API" + +#~ msgid "Unreleased" +#~ msgstr "Inédit" + +#~ msgid "**Deprecate Python 3.7**" +#~ msgstr "**Deprecate Python 3.7**" #~ msgid "" -#~ "**evaluation_result** -- The evaluation " -#~ "result, usually a Tuple containing loss" -#~ " and a dictionary containing task-" -#~ "specific metrics (e.g., accuracy)." +#~ "Since Python 3.7 reached its end " +#~ "of life (EOL) on 2023-06-27, support " +#~ "for Python 3.7 is now deprecated " +#~ "and will be removed in an upcoming" +#~ " release." #~ msgstr "" - -#~ msgid "Initialize the (global) model parameters." -#~ msgstr "Initialise le modèle global" +#~ "Étant donné que Python 3.7 a " +#~ "atteint sa fin de vie (EOL) le " +#~ "2023-06-27, la prise en charge de " +#~ "Python 3.7 est désormais dépréciée et" +#~ " sera supprimée dans une prochaine " +#~ "version." #~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the initial global model " -#~ "parameters." +#~ "**Add new** `FedTrimmedAvg` **strategy** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" #~ msgstr "" +#~ "**Ajouter un nouveau** `FedTrimmedAvg` " +#~ "**stratégie** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" -#~ msgid "Configurable FedAvg strategy implementation." -#~ msgstr "Configuration de l'évaluation fédérée" - -#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgid "" +#~ "The new `FedTrimmedAvg` strategy implements" +#~ " Trimmed Mean by [Dong Yin, " +#~ "2018](https://arxiv.org/abs/1803.01498)" #~ msgstr "" +#~ "La nouvelle stratégie `FedTrimmedAvg` met " +#~ "en œuvre la moyenne trimmée par " +#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" #~ msgid "" -#~ "Fraction of clients used during " -#~ "training. In case `min_fit_clients` is " -#~ "larger than `fraction_fit * " -#~ "available_clients`, `min_fit_clients` will still " -#~ "be sampled. Defaults to 1.0." +#~ "**Add parameter aggregation to** `mt-" +#~ "pytorch` **code example** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" #~ msgstr "" +#~ "**Ajouter l'agrégation des paramètres à** " +#~ "`mt-pytorch` **exemple de code** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" #~ msgid "" -#~ "Fraction of clients used during " -#~ "validation. In case `min_evaluate_clients` is" -#~ " larger than `fraction_evaluate * " -#~ "available_clients`, `min_evaluate_clients` will " -#~ "still be sampled. Defaults to 1.0." +#~ "The `mt-pytorch` example shows how " +#~ "to aggregate parameters when writing a" +#~ " driver script. The included `driver.py`" +#~ " and `server.py` have been aligned to" +#~ " demonstrate both the low-level way" +#~ " and the high-level way of " +#~ "building server-side logic." #~ msgstr "" +#~ "L'exemple `mt-pytorch` montre comment " +#~ "agréger des paramètres lors de " +#~ "l'écriture d'un script de pilote. Les" +#~ " fichiers `driver.py` et `server.py` inclus" +#~ " ont été alignés pour démontrer à " +#~ "la fois la manière de bas niveau" +#~ " et la manière de haut niveau " +#~ "de construire la logique côté serveur." -#~ msgid "Minimum number of clients used during training. Defaults to 2." +#~ msgid "" +#~ "**Introduce (experimental) gRPC request-" +#~ "response API** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" #~ msgstr "" +#~ "**Introduire l'API demande-réponse gRPC " +#~ "(expérimentale)** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" -#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgid "" +#~ "In addition to the existing gRPC " +#~ "API (based on bidirectional streaming) " +#~ "and the experimental REST API, there " +#~ "is now a new gRPC API that " +#~ "uses a request-response model to " +#~ "communicate with client nodes." #~ msgstr "" +#~ "En plus de l'API gRPC existante " +#~ "(basée sur un flux bidirectionnel) et" +#~ " de l'API REST expérimentale, il " +#~ "existe désormais une nouvelle API gRPC" +#~ " qui utilise un modèle demande-" +#~ "réponse pour communiquer avec les nœuds" +#~ " clients." -#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgid "" +#~ "Please note: The gRPC request-response" +#~ " API is still experimental and will" +#~ " likely change significantly over time." #~ msgstr "" +#~ "Remarque : l'API requête-réponse gRPC" +#~ " est encore expérimentale et est " +#~ "susceptible de changer de manière " +#~ "significative au fil du temps." -#~ msgid "Optional function used for validation. Defaults to None." +#~ msgid "" +#~ "**Replace the eperimental** " +#~ "`start_client(rest=True)` **with the new** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" #~ msgstr "" +#~ "**Remplacez le fichier expérimental** " +#~ "`start_client(rest=True) **par le nouveau** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" -#~ msgid "Function used to configure training. Defaults to None." +#~ msgid "" +#~ "The (experimental) `start_client` argument " +#~ "`rest` was deprecated in favor of " +#~ "a new argument `transport`. " +#~ "`start_client(transport=\"rest\")` will yield the" +#~ " same behaviour as `start_client(rest=True)` " +#~ "did before. All code should migrate " +#~ "to the new argument `transport`. The " +#~ "deprecated argument `rest` will be " +#~ "removed in a future release." #~ msgstr "" -#~ msgid "Function used to configure validation. Defaults to None." +#~ msgid "" +#~ "**Migrate experimental REST API to " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" #~ msgstr "" +#~ "**Migrer l'API REST expérimentale vers " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" -#~ msgid "Whether or not accept rounds containing failures. Defaults to True." +#~ msgid "" +#~ "The (experimental) REST API used to " +#~ "be implemented in " +#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" +#~ " now been migrated to use " +#~ "[Starlette](https://www.starlette.io/) directly." #~ msgstr "" +#~ "L'API REST (expérimentale) était auparavant" +#~ " implémentée dans " +#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " +#~ "a maintenant été migrée pour utiliser" +#~ " directement [Starlette](https://www.starlette.io/)." -#~ msgid "Initial global model parameters." -#~ msgstr "Initialise le modèle global" - -#~ msgid "Metrics aggregation function, optional." +#~ msgid "" +#~ "**Add a new gRPC option** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" #~ msgstr "" +#~ "**Ajouter une nouvelle option gRPC** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" -#~ msgid "Aggregate evaluation losses using weighted average." -#~ msgstr "Résultats globaux de l'évaluation." - -#~ msgid "Aggregate fit results using weighted average." +#~ msgid "" +#~ "We now start a gRPC server with" +#~ " the `grpc.keepalive_permit_without_calls` option " +#~ "set to 0 by default. This prevents" +#~ " the clients from sending keepalive " +#~ "pings when there is no outstanding " +#~ "stream." #~ msgstr "" +#~ "Nous démarrons maintenant un serveur " +#~ "gRPC avec l'option " +#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" +#~ " par défaut, ce qui empêche les " +#~ "clients d'envoyer des pings de maintien" +#~ " lorsqu'il n'y a pas de flux en" +#~ " attente." -#~ msgid "Evaluate model parameters using an evaluation function." +#~ msgid "" +#~ "**General improvements** " +#~ "([#1872](https://github.com/adap/flower/pull/1872), " +#~ "[#1866](https://github.com/adap/flower/pull/1866), " +#~ "[#1884](https://github.com/adap/flower/pull/1884))" #~ msgstr "" +#~ "**Mettre à jour les exemples de " +#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " +#~ "[#1286](https://github.com/adap/flower/pull/1286), " +#~ "[#1282](https://github.com/adap/flower/pull/1282))" -#~ msgid "Initialize global model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "Example projects" +#~ msgstr "Exemples" -#~ msgid "Use a fraction of available clients for evaluation." +#~ msgid "" +#~ "`Flower simulation PyTorch " +#~ "`_" #~ msgstr "" +#~ "`Flower Quickstart (TensorFlow/Keras) " +#~ "`_" -#~ msgid "Return the sample size and the required number of available clients." +#~ msgid "" +#~ "`Android Kotlin example " +#~ "`_" #~ msgstr "" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgid "`Android Java example `_" #~ msgstr "" -#~ msgid "Federated Averaging with Momentum strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "Build a strategy from scratch" +#~ msgstr "Élaborer une stratégie à partir de zéro" -#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" -#~ msgstr "" +#~ msgid "Customize the client" +#~ msgstr "Création du client IMDBC" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgid "Get started with Flower" #~ msgstr "" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "" +#~ msgid "Quickstart Android" +#~ msgstr "Démarrage rapide d'Android" #~ msgid "" -#~ "Server-side learning rate used in " -#~ "server-side optimization. Defaults to 1.0." -#~ msgstr "" - -#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ "Let's build a federated learning system" +#~ " using TFLite and Flower on Android!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant TFLite et Flower sur" +#~ " Android !" -#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example " +#~ "`_ to" +#~ " learn more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet " +#~ "`_ " +#~ "pour en savoir plus." -#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." -#~ msgstr "" +#~ msgid "Quickstart iOS" +#~ msgstr "Démarrage rapide iOS" -#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Neural Network on " +#~ "MNIST using Flower and CoreML on " +#~ "iOS devices." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un réseau neuronal sur " +#~ "MNIST en utilisant Flower et CoreML " +#~ "sur les appareils iOS." -#~ msgid "Federated Optim strategy interface." +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." #~ msgstr "" +#~ "Tout d'abord, pour l'exécution du " +#~ "serveur Flower Python, il est recommandé" +#~ " de créer un environnement virtuel et" +#~ " de tout exécuter au sein d'un " +#~ "`virtualenv `_. Pour l'implémentation du client" +#~ " Flower dans iOS, il est recommandé" +#~ " d'utiliser Xcode comme notre IDE." -#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgid "" +#~ "Our example consists of one Python " +#~ "*server* and two iPhone *clients* that" +#~ " all have the same model." #~ msgstr "" -#~ "FedYogi - Stratégie d'apprentissage fédéré " -#~ "utilisant Yogi côté serveur. Mise en " -#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" +#~ "Notre exemple se compose d'un *serveur*" +#~ " Python et de deux *clients* iPhone" +#~ " qui ont tous le même modèle." -#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locaux. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." -#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started to setup our Flower server " +#~ "environment. We first need to install" +#~ " Flower. You can do this by " +#~ "using pip:" #~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "approximative de ce qui se passe, " +#~ "commençons à configurer notre environnement" +#~ " de serveur Flower. Nous devons " +#~ "d'abord installer Flower, ce que tu " +#~ "peux faire à l'aide de pip :" -#~ msgid "Server-side learning rate. Defaults to 1e-1." -#~ msgstr "" +#~ msgid "Or Poetry:" +#~ msgstr "Ou de la poésie :" -#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training using CoreML " +#~ "as our local training pipeline and " +#~ "MNIST as our dataset. For simplicity " +#~ "reasons we will use the complete " +#~ "Flower client with CoreML, that has " +#~ "been implemented and stored inside the" +#~ " Swift SDK. The client implementation " +#~ "can be seen below:" #~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, exécutons une simple " +#~ "formation distribuée en utilisant CoreML " +#~ "comme pipeline de formation local et " +#~ "MNIST comme ensemble de données. Pour" +#~ " des raisons de simplicité, nous " +#~ "utiliserons le client Flower complet " +#~ "avec CoreML, qui a été mis en " +#~ "œuvre et stocké à l'intérieur du " +#~ "SDK Swift. La mise en œuvre du " +#~ "client peut être vue ci-dessous :" -#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." #~ msgstr "" +#~ "Créons un nouveau projet d'application " +#~ "dans Xcode et ajoutons :code:`flwr` " +#~ "comme dépendance dans ton projet. Pour" +#~ " notre application, nous stockerons la " +#~ "logique de notre application dans " +#~ ":code:`FLiOSModel.swift` et les éléments de" +#~ " l'interface utilisateur dans " +#~ ":code:`ContentView.swift`.Nous nous concentrerons " +#~ "davantage sur :code:`FLiOSModel.swift` dans ce" +#~ " quickstart. N'hésite pas à te " +#~ "référer à l'`exemple de code complet " +#~ "`_ pour" +#~ " en savoir plus sur l'application." -#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" +#~ "Importe les paquets liés à Flower " +#~ "et CoreML dans :code:`FLiOSModel.swift` :" -#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." #~ msgstr "" +#~ "Ensuite, ajoute le mlmodel au projet " +#~ "simplement par glisser-déposer, le " +#~ "mlmodel sera regroupé à l'intérieur de" +#~ " l'application lors du déploiement sur " +#~ "ton appareil iOS. Nous devons passer " +#~ "l'url pour accéder au mlmodel et " +#~ "exécuter les processus d'apprentissage " +#~ "automatique CoreML, elle peut être " +#~ "récupérée en appelant la fonction " +#~ ":code:`Bundle.main.url`. Pour l'ensemble de " +#~ "données MNIST, nous devons le prétraiter" +#~ " dans l'objet :code:`MLBatchProvider`. Le " +#~ "prétraitement est effectué à l'intérieur " +#~ "de :code:`DataLoader.swift`." -#~ msgid "Configurable FedProx strategy implementation." +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" -#~ msgid "Federated Optimization strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" -#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." #~ msgstr "" +#~ "Lance ensuite le client Flower gRPC " +#~ "et commence à communiquer avec le " +#~ "serveur en passant notre client Flower" +#~ " à la fonction :code:`startFlwrGRPC`." #~ msgid "" -#~ "The strategy in itself will not be" -#~ " different than FedAvg, the client " -#~ "needs to be adjusted. A proximal " -#~ "term needs to be added to the " -#~ "loss function during the training:" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ "d'appeler le :code:`MLFlwrClient` fourni et" +#~ " d'appeler :code:`startFlwrGRPC()`. L'attribut " +#~ ":code:`hostname` et :code:`port` indique au" +#~ " client à quel serveur se connecter." +#~ " Pour ce faire, il suffit d'entrer" +#~ " le nom d'hôte et le port dans" +#~ " l'application avant de cliquer sur " +#~ "le bouton de démarrage pour lancer " +#~ "le processus d'apprentissage fédéré." #~ msgid "" -#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -#~ "\n" -#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Build and run the client " +#~ "through your Xcode, one through Xcode" +#~ " Simulator and the other by deploying" +#~ " it to your iPhone. To see more" +#~ " about how to deploy your app " +#~ "to iPhone or Simulator visit `here " +#~ "`_." +#~ msgstr "" +#~ "Une fois que le serveur fonctionne, " +#~ "nous pouvons démarrer les clients dans" +#~ " différents terminaux. Construis et exécute" +#~ " le client grâce à ton Xcode, " +#~ "l'un via le simulateur Xcode et " +#~ "l'autre en le déployant sur ton " +#~ "iPhone. Pour en savoir plus sur la" +#~ " façon de déployer ton application " +#~ "sur l'iPhone ou le simulateur, visite" +#~ " `ici `_." #~ msgid "" -#~ "Where $w^t$ are the global parameters" -#~ " and $w$ are the local weights " -#~ "the function will be optimized with." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." #~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré " +#~ "dans ton appareil ios. Le `code " +#~ "source complet " +#~ "`_ de " +#~ "cet exemple se trouve dans " +#~ ":code:`examples/ios`." -#~ msgid "In PyTorch, for example, the loss would go from:" +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the open-source Flower community on " +#~ "Slack to connect, ask questions, and " +#~ "get help: `Join Slack `__ 🌼 We'd love to hear" +#~ " from you in the ``#introductions`` " +#~ "channel! And if anything is unclear, " +#~ "head over to the ``#questions`` channel." #~ msgstr "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ et rejoignez" +#~ " la communauté open-source Flower sur" +#~ " Slack pour vous connecter, poser des" +#~ " questions et obtenir de l'aide : " +#~ "`Join Slack `__ " +#~ "🌼 Nous serions ravis d'avoir de " +#~ "vos nouvelles dans le canal " +#~ "``#introductions`` ! Et si quelque chose" +#~ " n'est pas clair, dirigez-vous vers" +#~ " le canal ``#questions``." -#~ msgid "To:" +#~ msgid "|bd48315a61c14495babefe3c7918b493|" #~ msgstr "" -#~ msgid "" -#~ "With `global_params` being a copy of " -#~ "the parameters before the training takes" -#~ " place." +#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" #~ msgstr "" -#~ msgid "" -#~ "The weight of the proximal term " -#~ "used in the optimization. 0.0 makes " -#~ "this strategy equivalent to FedAvg, and" -#~ " the higher the coefficient, the more" -#~ " regularization will be used (that " -#~ "is, the client parameters will need " -#~ "to be closer to the server " -#~ "parameters during training)." +#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" #~ msgstr "" -#~ msgid "Sends the proximal factor mu to the clients" +#~ msgid "|13a655510351455292f145a61d6c15d6|" #~ msgstr "" -#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgid "|13949884182846e3a91433190a936ba9|" #~ msgstr "" -#~ "Stratégie FedAdagrad - Optimisation fédérée" -#~ " adaptative à l'aide d'Adagrad." -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgid "|9bf26cc650b146e88b4745df040ece37|" #~ msgstr "" -#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgid "|1590915480fc41708bd43e48af9582f9|" #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant fastai et Flower !" -#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." -#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." - -#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" #~ msgstr "" -#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgid "|84840b244edd47c481278ce534c126cd|" #~ msgstr "" -#~ msgid "FedYogi [Reddi et al., 2020] strategy." -#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." - -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." - -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "L'apprentissage fédéré en cinq étapes" - -#~ msgid "Differential Privacy Wrappers in Flower" -#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" - -#~ msgid "Evaluation" -#~ msgstr "Solution" - -#~ msgid "Code examples" -#~ msgstr "Exemple de code complet" - -#~ msgid "" -#~ "Flower Quickstart (PyTorch): coming soon " -#~ "(the TensorFlow/Keras example can easily " -#~ "be changed to make it work with" -#~ " PyTorch)" +#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" #~ msgstr "" -#~ msgid "First time contributors" -#~ msgstr "Bonnes premières contributions" - -#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgid "|5645db4ba9c945518d51ff234f35c797|" #~ msgstr "" -#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgid "|317af8d28fcc479ab981047d058c4751|" #~ msgstr "" -#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgid "|8bfd0e697a494d5385662debafade6bf|" #~ msgstr "" -#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgid "" +#~ "Differential privacy (DP) is often " +#~ "mentioned in the context of Federated" +#~ " Learning. It is a privacy-preserving" +#~ " method used when analyzing and " +#~ "sharing statistical data, ensuring the " +#~ "privacy of individual participants. DP " +#~ "achieves this by adding statistical " +#~ "noise to the model updates, ensuring " +#~ "any individual participants’ information " +#~ "cannot be distinguished or re-" +#~ "identified. This technique can be " +#~ "considered an optimization that provides " +#~ "a quantifiable privacy protection measure." #~ msgstr "" +#~ "La confidentialité différentielle (DP) est " +#~ "souvent mentionnée dans le contexte de" +#~ " l'apprentissage fédéré. Il s'agit d'une" +#~ " méthode de préservation de la vie" +#~ " privée utilisée lors de l'analyse et" +#~ " du partage de données statistiques, " +#~ "garantissant la confidentialité des " +#~ "participants individuels. La DP y " +#~ "parvient en ajoutant un bruit " +#~ "statistique aux mises à jour du " +#~ "modèle, garantissant que toute information " +#~ "sur les participants individuels ne peut" +#~ " être distinguée ou réidentifiée. Cette " +#~ "technique peut être considérée comme une" +#~ " optimisation qui fournit une mesure " +#~ "quantifiable de protection de la vie " +#~ "privée." -#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" #~ msgstr "" -#~ msgid "Logging" -#~ msgstr "Enregistrement" - -#~ msgid "|cce04c6f539b421a91f5dba40287193f|" -#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" - -#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" -#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" - -#~ msgid "|7e028f44defe4f31a02debc729f2010d|" -#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" - -#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" -#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" - -#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" -#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" - -#~ msgid "|a3246766a6db412888131b3bcdad0971|" -#~ msgstr "|a3246766a6db412888131b3bcdad0971|" - -#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" -#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" - -#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" -#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" - -#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" -#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" - -#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" -#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" - -#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" -#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" - -#~ msgid "|1fedb4f8714947e1b13f03696180c741|" -#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" - -#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" -#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" - -#~ msgid "|3531696c52904cd3b9944034ab959d48|" -#~ msgstr "|3531696c52904cd3b9944034ab959d48|" - -#~ msgid "An Introduction to Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" - -#~ msgid "Strategies in Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" - -#~ msgid "Building a Strategy" -#~ msgstr "Stratégies intégrées" - -#~ msgid "Client and NumPyClient" -#~ msgstr "NumPyClient" - -#~ msgid "Strategies" -#~ msgstr "Stratégies personnalisées" - -#~ msgid "SSL-enabled Server and Client" +#~ msgid "API Reference - Flower binaries" #~ msgstr "" -#~ msgid "About these documents" -#~ msgstr "À propos de ces documents" - -#~ msgid "Index" -#~ msgstr "Index" - -#~ msgid "Search" -#~ msgstr "Recherche" - -#~ msgid "Copyright" -#~ msgstr "Droits d'auteur" - -#~ msgid "Save Progress" -#~ msgstr "" +#~ msgid "API Reference - flwr" +#~ msgstr "Référence pour l'API" #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to persist model updates or" -#~ " evaluation results. Flower does not " -#~ "(yet) automatically save model updates " -#~ "on the server-side. It's on the" -#~ " roadmap to provide a built-in " -#~ "way of doing this." +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." #~ msgstr "" -#~ msgid "Release Process" -#~ msgstr "Publier Flower" - -#~ msgid "Virtual Env Installation" -#~ msgstr "Virtualenv avec Anaconda" - -#~ msgid "Install development versions" -#~ msgstr "Installer les versions de développement de Flower" - -#~ msgid "Set up a virtual env" -#~ msgstr "Mettre en place un environment virtuel" +#~ msgid "Returns a client's set of properties." +#~ msgstr "" #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might be change" +#~ " considerably in future versions of " +#~ "Flower." #~ msgstr "" -#~ "Notez que, pour construire la " -#~ "documentation localement (avec ``poetry run" -#~ " make html``, comme décrit ci-" -#~ "dessous), ``Pandoc _`" -#~ " doit être installé sur le système." - -#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" - -#~ msgid "XGBoost" -#~ msgstr "XGBoost" -#~ msgid "Android ONNX on-device training" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset,hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" -#~ "Utiliser Android ONNX pour faire du " -#~ "training directement sur le téléphone" -#~ msgid "Contribute on GitHub" -#~ msgstr "Contribuer sur GitHub" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not" +#~ msgstr "" -#~ msgid "How to write a good PR title" -#~ msgstr "Comment écrire un bon titre de PR" +#~ msgid "attempt to carry state over method invocations. Any state required by" +#~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "the instance (model, dataset,hyperparameters, " +#~ "...) should be (re-)created in either" +#~ " the call to `client_fn` or the " +#~ "call to any of the client methods" +#~ " (e.g., load evaluation data in the" +#~ " `evaluate` method itself)." #~ msgstr "" -#~ "Un titre de PR bien choisi permet" -#~ " aux autres développeurs de rapidement " -#~ "comprendre l'intérêt et le scope des " -#~ "changements proposés. Voici un guide " -#~ "pour vous aider à écrire des bons" -#~ " titres de PR :" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" +#~ "\n" #~ msgstr "" -#~ "1. Soyez clair et concis : Donnez" -#~ " un résumé clair des changements de" -#~ " manière concise. 1. Utilisez des " -#~ "verbes actionnables : Commencez par des" -#~ " verbes comme \"Add\", \"Update\", ou " -#~ "\"Fix\" pour indiquer le but. 1. " -#~ "Inclure des renseignements pertinents : " -#~ "Mentionner la caractéristique ou le " -#~ "module concerné pour le contexte. 1. " -#~ "Gardez le court : Évitez les longs" -#~ " titres pour une lisibilité facile. " -#~ "1. Utiliser une bonne capitalisation et" -#~ " une ponctuation : Suivre les règles" -#~ " de grammaire pour la clarté." #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "Adaptive Federated Optimization using Adagrad" +#~ " (FedAdagrad) [Reddi et al., 2020] " +#~ "strategy." #~ msgstr "" -#~ "Commençons par quelques exemples de " -#~ "titres qui devraient être évités parce" -#~ " qu'ils ne fournissent pas d'information" -#~ " significative :" -#~ msgid "Implement Algorithm" -#~ msgstr "Implement Algorithm" - -#~ msgid "Database" -#~ msgstr "Base de données" - -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "Add my_new_file.py to codebase" +#~ msgid "" +#~ "Adaptive Federated Optimization using Adam " +#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ msgstr "" -#~ msgid "Improve code in module" -#~ msgstr "Improve code in module" +#~ msgid "" +#~ "Adaptive Federated Optimization using Yogi " +#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ msgstr "" -#~ msgid "Change SomeModule" -#~ msgstr "Change SomeModule" +#~ msgid "Contributing Baselines" +#~ msgstr "Configuration du contributeur" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "Do you have a new federated " +#~ "learning paper and want to add a" +#~ " new baseline to Flower? Or do " +#~ "you want to add an experiment to" +#~ " an existing baseline paper? Great, " +#~ "we really appreciate your contribution." #~ msgstr "" -#~ "Voici quelques bons exemples qui " -#~ "fournissent de l'information utile sans " -#~ "répéter comment ils le font, comme " -#~ "cela est déjà visible dans la " -#~ "section \"Files changed\" de la PR " -#~ ":" - -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "Update docs banner to mention Flower Summit 2023" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "Remove unnecessary XGBoost dependency" +#~ msgid "" +#~ "The goal of Flower Baselines is to" +#~ " reproduce experiments from popular papers" +#~ " to accelerate researchers by enabling " +#~ "faster comparisons to new strategies, " +#~ "datasets, models, and federated pipelines " +#~ "in general." +#~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid "" +#~ "Before you start to work on a " +#~ "new baseline or experiment, please check" +#~ " the `Flower Issues " +#~ "`_ or `Flower " +#~ "Pull Requests `_ " +#~ "to see if someone else is already" +#~ " working on it. Please open a " +#~ "new issue if you are planning to" +#~ " work on a new baseline or " +#~ "experiment with a short description of" +#~ " the corresponding paper and the " +#~ "experiment you want to contribute." +#~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "TL;DR: Adding a new Flower Baseline" #~ msgstr "" -#~ "Ajoute une tâche CI pour déployer " -#~ "le système de mise en scène " -#~ "lorsque la branche `main` change" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "Let's say you want to contribute " +#~ "the code of your most recent " +#~ "Federated Learning publication, *FedAweseome*. " +#~ "There are only three steps necessary " +#~ "to create a new *FedAweseome* Flower " +#~ "Baseline:" #~ msgstr "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgid "Differential privacy" -#~ msgstr "Confidentialité différentielle" +#~ msgid "**Get the Flower source code on your machine**" +#~ msgstr "" #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to aggregate evaluation results," -#~ " but it enables the user to " -#~ "fully customize result aggregation." +#~ "Fork the Flower codebase: got to " +#~ "the `Flower GitHub repo " +#~ "`_ and fork the " +#~ "code (click the *Fork* button in " +#~ "the top-right corner and follow " +#~ "the instructions)" #~ msgstr "" -#~ msgid "Configure logging" -#~ msgstr "Configurer les clients" - #~ msgid "" -#~ "The Flower logger keeps track of " -#~ "all core events that take place in" -#~ " federated learning workloads. It presents" -#~ " information by default following a " -#~ "standard message format:" +#~ "Clone the (forked) Flower source code:" +#~ " :code:`git clone " +#~ "git@github.com:[your_github_username]/flower.git`" #~ msgstr "" -#~ "L'enregistreur de Flower garde la trace" -#~ " de tous les événements principaux " -#~ "qui ont lieu dans les charges de" -#~ " travail de l'apprentissage fédéré. Il " -#~ "présente les informations par défaut en" -#~ " suivant un format de message " -#~ "standard :" #~ msgid "" -#~ "containing relevant information including: log" -#~ " message level (e.g. :code:`INFO`, " -#~ ":code:`DEBUG`), a timestamp, the line " -#~ "where the logging took place from, " -#~ "as well as the log message itself." -#~ " In this way, the logger would " -#~ "typically display information on your " -#~ "terminal as follows:" +#~ "Open the code in your favorite " +#~ "editor (e.g., using VSCode: ``cd flower" +#~ " ; code .``)" #~ msgstr "" -#~ "contenant des informations pertinentes, " -#~ "notamment : le niveau du message " -#~ "de journal (par exemple :code:`INFO`, " -#~ ":code:`DEBUG`), un horodatage, la ligne " -#~ "à partir de laquelle l'enregistrement a" -#~ " eu lieu, ainsi que le message " -#~ "de journal lui-même. De cette " -#~ "façon, le logger afficherait typiquement " -#~ "des informations sur ton terminal comme" -#~ " suit :" -#~ msgid "Saving log to file" -#~ msgstr "Enregistrement du journal dans un fichier" +#~ msgid "**Add the FedAwesome code**" +#~ msgstr "" #~ msgid "" -#~ "By default, the Flower log is " -#~ "outputted to the terminal where you " -#~ "launch your Federated Learning workload " -#~ "from. This applies for both gRPC-" -#~ "based federation (i.e. when you do " -#~ ":code:`fl.server.start_server`) and when using " -#~ "the :code:`VirtualClientEngine` (i.e. when you" -#~ " do :code:`fl.simulation.start_simulation`). In " -#~ "some situations you might want to " -#~ "save this log to disk. You can " -#~ "do so by calling the " -#~ "`fl.common.logger.configure() " -#~ "`_" -#~ " function. For example:" +#~ "Add your :code:`FedAwesome` code under " +#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" +#~ msgstr "" + +#~ msgid "Add a `pyproject.toml` with all necessary dependencies" #~ msgstr "" -#~ "Par défaut, le journal de Flower " -#~ "est affiché dans le terminal à " -#~ "partir duquel tu as lancé ta " -#~ "charge de travail d'apprentissage fédéré. " -#~ "Cela s'applique à la fois à la " -#~ "fédération basée sur gRPC (c'est-à-dire " -#~ "lorsque tu fais :code:`fl.server.start_server`) " -#~ "et à l'utilisation du " -#~ ":code:`VirtualClientEngine` (c'est-à-dire lorsque tu" -#~ " fais :code:`fl.simulation.start_simulation`). Dans " -#~ "certaines situations, tu peux vouloir " -#~ "sauvegarder ce journal sur le disque." -#~ " Tu peux le faire en appelant " -#~ "la fonction `fl.common.logger.configure() " -#~ "`_." -#~ " Par exemple :" -#~ msgid "" -#~ "With the above, Flower will record " -#~ "the log you see on your terminal" -#~ " to :code:`log.txt`. This file will " -#~ "be created in the same directory " -#~ "as were you are running the code" -#~ " from. If we inspect we see the" -#~ " log above is also recorded but " -#~ "prefixing with :code:`identifier` each line:" +#~ msgid "Add a `README.md` describing how to use your baseline" #~ msgstr "" -#~ "Avec ce qui précède, Flower enregistrera" -#~ " le journal que tu vois sur ton" -#~ " terminal dans :code:`log.txt`. Ce fichier" -#~ " sera créé dans le même répertoire" -#~ " que celui à partir duquel tu " -#~ "exécutes le code. Si nous inspectons," -#~ " nous voyons que le journal ci-" -#~ "dessus est également enregistré, mais en" -#~ " préfixant chaque ligne avec " -#~ ":code:`identifier` :" -#~ msgid "Log your own messages" -#~ msgstr "Enregistrer tes propres messages" +#~ msgid "**Open a pull request**" +#~ msgstr "" + +#~ msgid "Stage your changes: :code:`git add .`" +#~ msgstr "" #~ msgid "" -#~ "You might expand the information shown" -#~ " by default with the Flower logger" -#~ " by adding more messages relevant to" -#~ " your application. You can achieve " -#~ "this easily as follows." +#~ "Commit & push: :code:`git commit -m " +#~ "\"Create new FedAweseome baseline\" ; " +#~ "git push`" #~ msgstr "" -#~ "Tu peux élargir les informations " -#~ "affichées par défaut avec le logger " -#~ "Flower en ajoutant d'autres messages " -#~ "pertinents pour ton application. Tu peux" -#~ " y parvenir facilement en procédant " -#~ "comme suit." #~ msgid "" -#~ "In this way your logger will show," -#~ " in addition to the default messages," -#~ " the ones introduced by the clients" -#~ " as specified above." +#~ "Open a pull request: go to *your*" +#~ " fork of the Flower codebase and " +#~ "create a pull request that targets " +#~ "the Flower ``main``` branch" #~ msgstr "" -#~ "De cette façon, ton logger affichera," -#~ " en plus des messages par défaut, " -#~ "ceux introduits par les clients comme" -#~ " spécifié ci-dessus." -#~ msgid "Log to a remote service" -#~ msgstr "Se connecter à un service distant" +#~ msgid "Further reading:" +#~ msgstr "Aide supplémentaire" #~ msgid "" -#~ "The :code:`fl.common.logger.configure` function, " -#~ "also allows specifying a host to " -#~ "which logs can be pushed (via " -#~ ":code:`POST`) through a native Python " -#~ ":code:`logging.handler.HTTPHandler`. This is a " -#~ "particularly useful feature in " -#~ ":code:`gRPC`-based Federated Learning workloads " -#~ "where otherwise gathering logs from all" -#~ " entities (i.e. the server and the" -#~ " clients) might be cumbersome. Note " -#~ "that in Flower simulation, the server" -#~ " automatically displays all logs. You " -#~ "can still specify a :code:`HTTPHandler` " -#~ "should you wish to backup or " -#~ "analyze the logs somewhere else." +#~ "`GitHub docs: About forks " +#~ "`_" #~ msgstr "" -#~ "La fonction :code:`fl.common.logger.configure` " -#~ "permet également de spécifier un hôte" -#~ " vers lequel les journaux peuvent " -#~ "être envoyés (via :code:`POST`) par " -#~ "l'intermédiaire d'un :code:`logging.handler.HTTPHandler`" -#~ " natif de Python. Il s'agit d'une " -#~ "fonction particulièrement utile dans les " -#~ "charges de travail d'apprentissage fédéré " -#~ "basées sur :code:`gRPC` où la collecte" -#~ " des journaux de toutes les entités" -#~ " (c'est-à-dire le serveur et les " -#~ "clients) pourrait s'avérer fastidieuse. Notez" -#~ " que dans la simulation Flower, le" -#~ " serveur affiche automatiquement tous les" -#~ " journaux. Vous pouvez toujours spécifier" -#~ " un :code:`HTTPHandler` si vous souhaitez" -#~ " sauvegarder ou analyser les journaux " -#~ "à un autre endroit." - -#~ msgid "Enable SSL connections" -#~ msgstr "Collecte centralisée des données" -#~ msgid "Python version" -#~ msgstr "Version Python" +#~ msgid "" +#~ "`GitHub docs: Creating a pull request" +#~ " `_" +#~ msgstr "" #~ msgid "" -#~ "Flower requires at least `Python 3.7 " -#~ "`_, but `Python 3.8" -#~ " `_ or above is " -#~ "recommended." +#~ "`GitHub docs: Creating a pull request" +#~ " from a fork `_" #~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." -#~ msgid "Run simulations" -#~ msgstr "Simulation de moniteur" +#~ msgid "Requirements" +#~ msgstr "Changements nécessaires" #~ msgid "" -#~ "Simulating Federated Learning workloads is " -#~ "useful for a multitude of use-" -#~ "cases: you might want to run your" -#~ " workload on a large cohort of " -#~ "clients but without having to source," -#~ " configure and mange a large number" -#~ " of physical devices; you might want" -#~ " to run your FL workloads as " -#~ "fast as possible on the compute " -#~ "systems you have access to without " -#~ "having to go through a complex " -#~ "setup process; you might want to " -#~ "validate your algorithm on different " -#~ "scenarios at varying levels of data " -#~ "and system heterogeneity, client availability," -#~ " privacy budgets, etc. These are " -#~ "among some of the use-cases where" -#~ " simulating FL workloads makes sense. " -#~ "Flower can accommodate these scenarios " -#~ "by means of its `VirtualClientEngine " -#~ "`_ or VCE." +#~ "Contributing a new baseline is really" +#~ " easy. You only have to make " +#~ "sure that your federated learning " +#~ "experiments are running with Flower. As" +#~ " soon as you have created a " +#~ "Flower-based experiment, you can contribute" +#~ " it." #~ msgstr "" #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_client " -#~ "`_) in the" -#~ " sense that they can be configure " -#~ "by creating a class inheriting, for " -#~ "example, from `flwr.client.NumPyClient `_ and therefore" -#~ " behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "It is recommended (but not required) " +#~ "to use `Hydra `_ to " +#~ "execute the experiment." #~ msgstr "" #~ msgid "" -#~ "resource-aware: this means that each " -#~ "client gets assigned a portion of " -#~ "the compute and memory on your " -#~ "system. You as a user can control" -#~ " this at the beginning of the " -#~ "simulation and allows you to control " -#~ "the degree of parallelism of your " -#~ "Flower FL simulation. The fewer the " -#~ "resources per client, the more clients" -#~ " can run concurrently on the same " -#~ "hardware." +#~ "Please make sure to add your " +#~ "baseline or experiment to the " +#~ "corresponding directory as explained in " +#~ "`Executing Baseline `_. Give your baseline the " +#~ "unique identifier. For example, :code:`fedbn`" +#~ " refers to the paper \"FedBN: " +#~ "Federated Learning on non-IID Features" +#~ " via Local Batch Normalization\" and " +#~ "creates the corresponding directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" +#~ " you create the experiment directory " +#~ "with the experiment name. For example," +#~ " the experiment that measures the " +#~ "convergence has the directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." +#~ " This directory contains all your " +#~ "code and a :code:`README.md` with a " +#~ "link to the paper, the paper's " +#~ "abstract, and a detailed description of" +#~ " how to execute the experiments." #~ msgstr "" #~ msgid "" -#~ "self-managed: this means that you " -#~ "as a user do not need to " -#~ "launch clients manually, instead this " -#~ "gets delegated to :code:`VirtualClientEngine`'s " -#~ "internals." +#~ "Please also check if :code:`pyproject.toml`" +#~ " and :code:`requirements.txt` (all in the" +#~ " directory `baselines " +#~ "`_ contain" +#~ " all required Python packages (libraries," +#~ " frameworks, ...). If the required " +#~ "Python package is not yet listed, " +#~ "please add it to :code:`pyproject.toml`. " +#~ "If you need a different version of" +#~ " a package already listed, please try" +#~ " to ensure your experiment runs with" +#~ " the existing version listed in " +#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " +#~ "If that doesn't work, open a " +#~ "GitHub Issue and request the version " +#~ "change." #~ msgstr "" #~ msgid "" -#~ "ephemeral: this means that a client " -#~ "is only materialized when it is " -#~ "required in the FL process (e.g. " -#~ "to do `fit() `_). The object is" -#~ " destroyed afterwards, releasing the " -#~ "resources it was assigned and allowing" -#~ " in this way other clients to " -#~ "participate." +#~ "The experiment also needs to contain " +#~ "a file with a downloader for the" +#~ " dataset - if possible automatic. " +#~ "This can be included in one of " +#~ "the files or as an extra file." #~ msgstr "" #~ msgid "" -#~ "The :code:`VirtualClientEngine` implements `virtual`" -#~ " clients using `Ray `_, " -#~ "an open-source framework for scalable" -#~ " Python workloads. In particular, Flower's" -#~ " :code:`VirtualClientEngine` makes use of " -#~ "`Actors `_ to spawn `virtual` clients" -#~ " and run their workload." +#~ "Finally, please add plots for all " +#~ "experimental results your code is " +#~ "running to the :code:`experiment` directory" +#~ " and include them in :code:`README.md`. " +#~ "Doing this helps others and enables " +#~ "them to recognize your contributions " +#~ "quickly." +#~ msgstr "" + +#~ msgid "" +#~ "We are aware that a few libraries" +#~ " are available only via Conda. " +#~ "However, we want to encourage you " +#~ "to ensure that your code also runs" +#~ " well outside of Conda to make " +#~ "it more accessible to the broader " +#~ "research community." #~ msgstr "" -#~ msgid "Launch your Flower simulation" +#~ msgid "Here is a checklist for adding a new baseline:" #~ msgstr "" #~ msgid "" -#~ "Running Flower simulations still require " -#~ "you to define your client class, a" -#~ " strategy, and utility functions to " -#~ "download and load (and potentially " -#~ "partition) your dataset. With that out" -#~ " of the way, launching your " -#~ "simulation is done with `start_simulation " -#~ "`_ " -#~ "and a minimal example looks as " -#~ "follows:" +#~ "add required Python packages to " +#~ ":code:`pyproject.toml` or :code:`requirements.txt`" #~ msgstr "" -#~ msgid "VirtualClientEngine resources" -#~ msgstr "Moteur de client virtuel" - #~ msgid "" -#~ "By default the VCE has access to" -#~ " all system resources (i.e. all CPUs," -#~ " all GPUs, etc) since that is " -#~ "also the default behavior when starting" -#~ " Ray. However, in some settings you" -#~ " might want to limit how many " -#~ "of your system resources are used " -#~ "for simulation. You can do this " -#~ "via the :code:`ray_init_args` input argument" -#~ " to :code:`start_simulation` which the VCE" -#~ " internally passes to Ray's " -#~ ":code:`ray.init` command. For a complete " -#~ "list of settings you can configure " -#~ "check the `ray.init `_ " -#~ "documentation. Do not set " -#~ ":code:`ray_init_args` if you want the " -#~ "VCE to use all your system's CPUs" -#~ " and GPUs." +#~ "add all required code under " +#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" #~ msgstr "" -#~ msgid "Assigning client resources" +#~ msgid "add a dataset downloader" #~ msgstr "" -#~ msgid "" -#~ "By default the :code:`VirtualClientEngine` " -#~ "assigns a single CPU core (and " -#~ "nothing else) to each virtual client." -#~ " This means that if your system " -#~ "has 10 cores, that many virtual " -#~ "clients can be concurrently running." +#~ msgid "add an experiment plot" #~ msgstr "" -#~ msgid "" -#~ "More often than not, you would " -#~ "probably like to adjust the resources" -#~ " your clients get assigned based on" -#~ " the complexity (i.e. compute and " -#~ "memory footprint) of your FL workload." -#~ " You can do so when starting " -#~ "your simulation by setting the argument" -#~ " `client_resources` to `start_simulation `_. Two " -#~ "keys are internally used by Ray to" -#~ " schedule and spawn workloads (in our" -#~ " case Flower clients):" +#~ msgid "add a :code:`README.md`" #~ msgstr "" -#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgid "Usability" #~ msgstr "" #~ msgid "" -#~ ":code:`num_gpus` indicates the **ratio** of" -#~ " GPU memory a client gets assigned." +#~ "Flower is known and loved for its" +#~ " usability. Therefore, make sure that " +#~ "your baseline or experiment can be " +#~ "executed with a single command such " +#~ "as :code:`./run.sh` or :code:`python3 " +#~ "main.py`. How you organize the " +#~ "experiments and the related code " +#~ "structure is up to you as an " +#~ "author, but please keep in mind to" +#~ " make sure that other users can " +#~ "easily understand and execute your " +#~ "baseline." #~ msgstr "" -#~ msgid "Let's see a few examples:" +#~ msgid "We look forward to your contribution!" +#~ msgstr "Exemple de première contribution" + +#~ msgid "flwr" +#~ msgstr "Fleur" + +#~ msgid "binaries" #~ msgstr "" +#~ msgid "Flower Baselines" +#~ msgstr "Demande pour une nouvelle Flower Baseline" + #~ msgid "" -#~ "While the :code:`client_resources` can be " -#~ "used to control the degree of " -#~ "concurrency in your FL simulation, this" -#~ " does not stop you from running " -#~ "dozens, hundreds or even thousands of" -#~ " clients in the same round and " -#~ "having orders of magnitude more " -#~ "`dormant` (i.e. not participating in a" -#~ " round) clients. Let's say you want" -#~ " to have 100 clients per round " -#~ "but your system can only accommodate " -#~ "8 clients concurrently. The " -#~ ":code:`VirtualClientEngine` will schedule 100 " -#~ "jobs to run (each simulating a " -#~ "client sampled by the strategy) and " -#~ "then will execute them in a " -#~ "resource-aware manner in batches of " -#~ "8." +#~ "Flower Baselines are a collection of " +#~ "organised scripts used to reproduce " +#~ "results from well-known publications or" +#~ " benchmarks. You can check which " +#~ "baselines already exist and/or contribute " +#~ "your own baseline." #~ msgstr "" -#~ msgid "" -#~ "To understand all the intricate details" -#~ " on how resources are used to " -#~ "schedule FL clients and how to " -#~ "define custom resources, please take a" -#~ " look at the `Ray documentation " -#~ "`_." +#~ msgid "Flower requires `Python 3.7 `_ or above." +#~ msgstr "`Python 3.7 `_ ou plus" + +#~ msgid "|9e234df38403464899ad3aee36bf1b95|" #~ msgstr "" -#~ msgid "Simulation examples" -#~ msgstr "Exemples de PyTorch" +#~ msgid "|081158351506446f9f772cb45ee68523|" +#~ msgstr "" -#~ msgid "" -#~ "A few ready-to-run complete " -#~ "examples for Flower simulation in " -#~ "Tensorflow/Keras and PyTorch are provided " -#~ "in the `Flower repository " -#~ "`_. You can run " -#~ "them on Google Colab too:" +#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" #~ msgstr "" -#~ msgid "" -#~ "`Tensorflow/Keras Simulation " -#~ "`_: 100 clients collaboratively " -#~ "train a MLP model on MNIST." +#~ msgid "|11b83bb107344db78a37266e080c4a7a|" #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" -#~ msgid "" -#~ "`PyTorch Simulation " -#~ "`_: 100 clients collaboratively train" -#~ " a CNN model on MNIST." +#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" #~ msgstr "" -#~ "`Quickstart PyTorch (Code) " -#~ "`_" -#~ msgid "" -#~ "Flower's :code:`VirtualClientEngine` allows you " -#~ "to run FL simulations across multiple" -#~ " compute nodes. Before starting your " -#~ "multi-node simulation ensure that you:" +#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" #~ msgstr "" -#~ msgid "Have the same Python environment in all nodes." +#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" #~ msgstr "" -#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#~ msgid "|4b149f3a095b402bb8890275aabc9298|" #~ msgstr "" -#~ msgid "" -#~ "Have a copy of your dataset in " -#~ "all nodes (more about this in " -#~ ":ref:`simulation considerations `)" +#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" #~ msgstr "" -#~ msgid "" -#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " -#~ "`start_simulation `_ so the " -#~ ":code:`VirtualClientEngine` attaches to a " -#~ "running Ray instance." +#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" #~ msgstr "" -#~ msgid "" -#~ "Start Ray on you head node: on " -#~ "the terminal type :code:`ray start " -#~ "--head`. This command will print a " -#~ "few lines, one of which indicates " -#~ "how to attach other nodes to the" -#~ " head node." +#~ msgid "|d669336577b545a081d5d74169a9bc4d|" #~ msgstr "" -#~ msgid "" -#~ "Attach other nodes to the head " -#~ "node: copy the command shown after " -#~ "starting the head and execute it " -#~ "on terminal of a new node: for " -#~ "example :code:`ray start " -#~ "--address='192.168.1.132:6379'`" +#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" #~ msgstr "" -#~ msgid "" -#~ "With all the above done, you can" -#~ " run your code from the head " -#~ "node as you would if the " -#~ "simulation was running on a single " -#~ "node." +#~ msgid "|29a11f5353084c1995c538f7edef71a5|" #~ msgstr "" -#~ msgid "" -#~ "Once your simulation is finished, if " -#~ "you'd like to dismantle your cluster " -#~ "you simply need to run the command" -#~ " :code:`ray stop` in each node's " -#~ "terminal (including the head node)." +#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" #~ msgstr "" -#~ msgid "Multi-node simulation good-to-know" +#~ msgid "Using Baselines" #~ msgstr "" -#~ msgid "" -#~ "Here we list a few interesting " -#~ "functionality when running multi-node FL" -#~ " simulations:" +#~ msgid "Structure" #~ msgstr "" #~ msgid "" -#~ "User :code:`ray status` to check all " -#~ "nodes connected to your head node " -#~ "as well as the total resources " -#~ "available to the :code:`VirtualClientEngine`." +#~ "All baselines are available in the " +#~ "directory `baselines " +#~ "`_. This " +#~ "directory has two different files:" #~ msgstr "" #~ msgid "" -#~ "When attaching a new node to the" -#~ " head, all its resources (i.e. all" -#~ " CPUs, all GPUs) will be visible " -#~ "by the head node. This means that" -#~ " the :code:`VirtualClientEngine` can schedule " -#~ "as many `virtual` clients as that " -#~ "node can possible run. In some " -#~ "settings you might want to exclude " -#~ "certain resources from the simulation. " -#~ "You can do this by appending " -#~ "`--num-cpus=` and/or `--num-" -#~ "gpus=` in any :code:`ray " -#~ "start` command (including when starting " -#~ "the head)" +#~ "Both files contain all the information" +#~ " about required Python packages (libraries," +#~ " frameworks, ...) and their versions. " +#~ "You can install each library separately" +#~ " by using :code: `pip install` or " +#~ "you can use Poetry and run " +#~ "code:`poetry install` in the directory " +#~ "where you find the :code:`pyproject.toml` " +#~ "file. After installing all requirements, " +#~ "you can start to run your " +#~ "baseline." #~ msgstr "" -#~ msgid "Considerations for simulations" -#~ msgstr "Simulation de moniteur" - #~ msgid "" -#~ "We are actively working on these " -#~ "fronts so to make it trivial to" -#~ " run any FL workload with Flower " -#~ "simulation." +#~ "Go to the baseline that you want" +#~ " to execute. The directories and " +#~ "files are structured so that you " +#~ "can first find the paper with " +#~ "their unique identifier such that, for" +#~ " example, :code:`FedProx` refers to the " +#~ "paper \"Federated Optimization in " +#~ "Heterogeneous Networks\". The :code:`fedprox` " +#~ "section contains all available experiments " +#~ "from that paper." #~ msgstr "" #~ msgid "" -#~ "The current VCE allows you to run" -#~ " Federated Learning workloads in simulation" -#~ " mode whether you are prototyping " -#~ "simple scenarios on your personal laptop" -#~ " or you want to train a complex" -#~ " FL pipeline across multiple high-" -#~ "performance GPU nodes. While we add " -#~ "more capabilities to the VCE, the " -#~ "points below highlight some of the " -#~ "considerations to keep in mind when " -#~ "designing your FL pipeline with Flower." -#~ " We also highlight a couple of " -#~ "current limitations in our implementation." +#~ "The experiment area contains a " +#~ ":code:`README.md` covering the corresponding " +#~ "paper, its abstract, and goal as " +#~ "well as a detailed description of " +#~ "how to run the baseline. Please " +#~ "use the :code:`README.md` to see how " +#~ "to execute each individual baseline." #~ msgstr "" -#~ msgid "GPU resources" -#~ msgstr "Ressources" - -#~ msgid "" -#~ "The VCE assigns a share of GPU " -#~ "memory to a client that specifies " -#~ "the key :code:`num_gpus` in " -#~ ":code:`client_resources`. This being said, Ray" -#~ " (used internally by the VCE) is " -#~ "by default:" +#~ msgid "Available Baselines" #~ msgstr "" #~ msgid "" -#~ "not aware of the total VRAM " -#~ "available on the GPUs. This means " -#~ "that if you set :code:`num_gpus=0.5` and" -#~ " you have two GPUs in your " -#~ "system with different (e.g. 32GB and " -#~ "8GB) VRAM amounts, they both would " -#~ "run 2 clients concurrently." +#~ "The following table lists all currently" +#~ " available baselines and the corresponding" +#~ " papers. If you want to add a" +#~ " new baseline or experiment, please " +#~ "check the `Contributing Baselines " +#~ "`_ section." #~ msgstr "" -#~ msgid "" -#~ "not aware of other unrelated (i.e. " -#~ "not created by the VCE) workloads " -#~ "are running on the GPU. Two " -#~ "takeaways from this are:" +#~ msgid "Paper" #~ msgstr "" -#~ msgid "" -#~ "Your Flower server might need a " -#~ "GPU to evaluate the `global model` " -#~ "after aggregation (by instance when " -#~ "making use of the `evaluate method " -#~ "`_)" +#~ msgid "Experiment" #~ msgstr "" -#~ msgid "" -#~ "If you want to run several " -#~ "independent Flower simulations on the " -#~ "same machine you need to mask-out" -#~ " your GPUs with " -#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" -#~ " your experiment." +#~ msgid "Directory" #~ msgstr "" -#~ msgid "" -#~ "In addition, the GPU resource limits " -#~ "passed to :code:`client_resources` are not " -#~ "`enforced` (i.e. they can be exceeded)" -#~ " which can result in the situation" -#~ " of client using more VRAM than " -#~ "the ratio specified when starting the" -#~ " simulation." +#~ msgid "`FedAvg `_" #~ msgstr "" -#~ msgid "TensorFlow with GPUs" -#~ msgstr "Exemples de TensorFlow" +#~ msgid "MNIST" +#~ msgstr "" -#~ msgid "" -#~ "When `using a GPU with TensorFlow " -#~ "`_ nearly your " -#~ "entire GPU memory of all your GPUs" -#~ " visible to the process will be " -#~ "mapped. This is done by TensorFlow " -#~ "for optimization purposes. However, in " -#~ "settings such as FL simulations where" -#~ " we want to split the GPU into" -#~ " multiple `virtual` clients, this is " -#~ "not a desirable mechanism. Luckily we" -#~ " can disable this default behavior by" -#~ " `enabling memory growth " -#~ "`_." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" #~ msgstr "" -#~ msgid "" -#~ "This would need to be done in " -#~ "the main process (which is where " -#~ "the server would run) and in each" -#~ " Actor created by the VCE. By " -#~ "means of :code:`actor_kwargs` we can " -#~ "pass the reserved key `\"on_actor_init_fn\"`" -#~ " in order to specify a function " -#~ "to be executed upon actor " -#~ "initialization. In this case, to enable" -#~ " GPU growth for TF workloads. It " -#~ "would look as follows:" +#~ msgid "`FedProx `_" #~ msgstr "" -#~ msgid "" -#~ "This is precisely the mechanism used " -#~ "in `Tensorflow/Keras Simulation " -#~ "`_ example." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" -#~ msgid "Multi-node setups" +#~ msgid "`FedOpt `_" #~ msgstr "" -#~ msgid "" -#~ "The VCE does not currently offer a" -#~ " way to control on which node a" -#~ " particular `virtual` client is executed." -#~ " In other words, if more than a" -#~ " single node have the resources " -#~ "needed by a client to run, then" -#~ " any of those nodes could get " -#~ "the client workload scheduled onto. " -#~ "Later in the FL process (i.e. in" -#~ " a different round) the same client" -#~ " could be executed by a different " -#~ "node. Depending on how your clients " -#~ "access their datasets, this might " -#~ "require either having a copy of " -#~ "all dataset partitions on all nodes " -#~ "or a dataset serving mechanism (e.g. " -#~ "using nfs, a database) to circumvent " -#~ "data duplication." +#~ msgid "sparse gradient task" #~ msgstr "" -#~ msgid "" -#~ "By definition virtual clients are " -#~ "`stateless` due to their ephemeral " -#~ "nature. A client state can be " -#~ "implemented as part of the Flower " -#~ "client class but users need to " -#~ "ensure this saved to persistent storage" -#~ " (e.g. a database, disk) and that " -#~ "can be retrieve later by the same" -#~ " client regardless on which node it" -#~ " is running from. This is related " -#~ "to the point above also since, in" -#~ " some way, the client's dataset could" -#~ " be seen as a type of `state`." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" #~ msgstr "" -#~ msgid "Save and load model checkpoints" -#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +#~ msgid "`FedBN `_" +#~ msgstr "" -#~ msgid "" -#~ "Flower does not automatically save model" -#~ " updates on the server-side. This " -#~ "how-to guide describes the steps " -#~ "to save (and load) model checkpoints " -#~ "in Flower." +#~ msgid "convergence rate" #~ msgstr "" -#~ msgid "Legacy example guides" +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" #~ msgstr "" -#~ msgid "Contributor tutorials" -#~ msgstr "Configuration du contributeur" +#~ msgid "" +#~ "Flower requires `Python 3.7 " +#~ "`_ or above, we " +#~ "recommend `Python 3.8 " +#~ "`_." +#~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." -#~ msgid "Contributor explanations" -#~ msgstr "Explications" +#~ msgid "|6baade94cd14454e82ead34fcc29a182|" +#~ msgstr "" -#~ msgid "Flower Framework Documentation" -#~ msgstr "Documentation de Flower" +#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" +#~ msgstr "" -#~ msgid "PyTorch" -#~ msgstr "Exemples de PyTorch" +#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgstr "" -#~ msgid "TensorFlow" -#~ msgstr "TensorFlow" +#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" +#~ msgstr "" -#~ msgid "Flower CLI reference" -#~ msgstr "Client de Flower" +#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" +#~ msgstr "" -#~ msgid "flwr (Python API reference)" -#~ msgstr "Référence pour l'API" +#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" +#~ msgstr "" -#~ msgid "Unreleased" -#~ msgstr "Inédit" +#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" +#~ msgstr "" -#~ msgid "**Deprecate Python 3.7**" -#~ msgstr "**Deprecate Python 3.7**" +#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgstr "" -#~ msgid "" -#~ "Since Python 3.7 reached its end " -#~ "of life (EOL) on 2023-06-27, support " -#~ "for Python 3.7 is now deprecated " -#~ "and will be removed in an upcoming" -#~ " release." +#~ msgid "|1cfc77af5d164030942e84d14268c256|" #~ msgstr "" -#~ "Étant donné que Python 3.7 a " -#~ "atteint sa fin de vie (EOL) le " -#~ "2023-06-27, la prise en charge de " -#~ "Python 3.7 est désormais dépréciée et" -#~ " sera supprimée dans une prochaine " -#~ "version." -#~ msgid "" -#~ "**Add new** `FedTrimmedAvg` **strategy** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" +#~ msgid "|0d50828231a64bc08223544a2d2fa216|" #~ msgstr "" -#~ "**Ajouter un nouveau** `FedTrimmedAvg` " -#~ "**stratégie** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" -#~ msgid "" -#~ "The new `FedTrimmedAvg` strategy implements" -#~ " Trimmed Mean by [Dong Yin, " -#~ "2018](https://arxiv.org/abs/1803.01498)" +#~ msgid "|904387757ceb42fbaa1875f3e8061113|" +#~ msgstr "" + +#~ msgid "|68608e1b7c4842458c528b431c715f5a|" #~ msgstr "" -#~ "La nouvelle stratégie `FedTrimmedAvg` met " -#~ "en œuvre la moyenne trimmée par " -#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" -#~ msgid "" -#~ "**Add parameter aggregation to** `mt-" -#~ "pytorch` **code example** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" +#~ msgid "|2adb106bda97480bb4b33eac472e321e|" #~ msgstr "" -#~ "**Ajouter l'agrégation des paramètres à** " -#~ "`mt-pytorch` **exemple de code** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" -#~ msgid "" -#~ "The `mt-pytorch` example shows how " -#~ "to aggregate parameters when writing a" -#~ " driver script. The included `driver.py`" -#~ " and `server.py` have been aligned to" -#~ " demonstrate both the low-level way" -#~ " and the high-level way of " -#~ "building server-side logic." +#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" #~ msgstr "" -#~ "L'exemple `mt-pytorch` montre comment " -#~ "agréger des paramètres lors de " -#~ "l'écriture d'un script de pilote. Les" -#~ " fichiers `driver.py` et `server.py` inclus" -#~ " ont été alignés pour démontrer à " -#~ "la fois la manière de bas niveau" -#~ " et la manière de haut niveau " -#~ "de construire la logique côté serveur." + +#~ msgid "Before the release" +#~ msgstr "Avant la sortie" #~ msgid "" -#~ "**Introduce (experimental) gRPC request-" -#~ "response API** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" #~ msgstr "" -#~ "**Introduire l'API demande-réponse gRPC " -#~ "(expérimentale)** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ "Mettez à jour le journal des " +#~ "modifications (``changelog.md``) avec tous les" +#~ " changements pertinents qui se sont " +#~ "produits après la dernière version. Si" +#~ " la dernière version a été étiquetée" +#~ " ``v1.2.0``, vous pouvez utiliser l'URL " +#~ "suivante pour voir tous les commits " +#~ "qui ont été fusionnés dans ``main`` " +#~ "depuis lors :" #~ msgid "" -#~ "In addition to the existing gRPC " -#~ "API (based on bidirectional streaming) " -#~ "and the experimental REST API, there " -#~ "is now a new gRPC API that " -#~ "uses a request-response model to " -#~ "communicate with client nodes." +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" #~ msgstr "" -#~ "En plus de l'API gRPC existante " -#~ "(basée sur un flux bidirectionnel) et" -#~ " de l'API REST expérimentale, il " -#~ "existe désormais une nouvelle API gRPC" -#~ " qui utilise un modèle demande-" -#~ "réponse pour communiquer avec les nœuds" -#~ " clients." +#~ "`GitHub : Compare v1.2.0...main " +#~ "`_" #~ msgid "" -#~ "Please note: The gRPC request-response" -#~ " API is still experimental and will" -#~ " likely change significantly over time." +#~ "Thank the authors who contributed since" +#~ " the last release. This command helps" +#~ " extract them: ``git log --format='%aN' " +#~ "v1.1.0..HEAD | sort -u``. The command" +#~ " has the same order as ``git " +#~ "shortlog``." #~ msgstr "" -#~ "Remarque : l'API requête-réponse gRPC" -#~ " est encore expérimentale et est " -#~ "susceptible de changer de manière " -#~ "significative au fil du temps." +#~ "Remerciez les auteurs qui ont contribué" +#~ " depuis la dernière version. Cette " +#~ "commande permet de les extraire : " +#~ "``git log --format='%aN' v1.1.0..HEAD | " +#~ "sort -u``. La commande a le même" +#~ " ordre que ``git shortlog``." #~ msgid "" -#~ "**Replace the eperimental** " -#~ "`start_client(rest=True)` **with the new** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." #~ msgstr "" -#~ "**Remplacez le fichier expérimental** " -#~ "`start_client(rest=True) **par le nouveau** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ "Mettez à jour l'en-tête de section" +#~ " ``changelog.md`` ``Unreleased`` pour qu'il " +#~ "contienne le numéro de version et " +#~ "la date de la version que vous " +#~ "construisez. Créez une demande de " +#~ "traction avec le changement." #~ msgid "" -#~ "The (experimental) `start_client` argument " -#~ "`rest` was deprecated in favor of " -#~ "a new argument `transport`. " -#~ "`start_client(transport=\"rest\")` will yield the" -#~ " same behaviour as `start_client(rest=True)` " -#~ "did before. All code should migrate " -#~ "to the new argument `transport`. The " -#~ "deprecated argument `rest` will be " -#~ "removed in a future release." +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``" #~ msgstr "" +#~ "Marquez le commit de la version " +#~ "avec le numéro de version dès que" +#~ " le PR est fusionné : ``git tag" +#~ " v0.12.3``, puis ``git push --tags``" #~ msgid "" -#~ "**Migrate experimental REST API to " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ "Build the release with ``./dev/build.sh``, " +#~ "then publish it with ``./dev/publish.sh``" #~ msgstr "" -#~ "**Migrer l'API REST expérimentale vers " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ "Construisez la version avec " +#~ "``./dev/build.sh``, puis publiez-la avec " +#~ "``./dev/publish.sh``" #~ msgid "" -#~ "The (experimental) REST API used to " -#~ "be implemented in " -#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" -#~ " now been migrated to use " -#~ "[Starlette](https://www.starlette.io/) directly." +#~ "Create an entry in GitHub releases " +#~ "with the release notes for the " +#~ "previously tagged commit and attach the" +#~ " build artifacts (:code:`.whl` and " +#~ ":code:`.tar.gz`)." #~ msgstr "" -#~ "L'API REST (expérimentale) était auparavant" -#~ " implémentée dans " -#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " -#~ "a maintenant été migrée pour utiliser" -#~ " directement [Starlette](https://www.starlette.io/)." +#~ "Crée une entrée dans GitHub releases " +#~ "avec les notes de version pour le" +#~ " commit précédemment étiqueté et attache" +#~ " les artefacts de construction " +#~ "(:code:`.whl` et :code:`.tar.gz`)." #~ msgid "" -#~ "**Add a new gRPC option** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" #~ msgstr "" -#~ "**Ajouter une nouvelle option gRPC** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ "Deuxièmement, créer un environnement virtuel" +#~ " (et l'activer). Si vous choisissez " +#~ "d'utiliser :code:`pyenv` (avec le plugin " +#~ ":code:`pyenv-virtualenv`) et que vous " +#~ "l'avez déjà installé, vous pouvez " +#~ "utiliser le script suivant (par défaut" +#~ " il utilisera :code:`Python 3.8.17`, mais" +#~ " vous pouvez le changer en " +#~ "fournissant une :code:`` spécifique)::" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "serveur.stratégie.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "stratégie.serveur.FedAvgM" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "serveur.stratégie.FedOpt" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "serveur.stratégie.FedProx" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "serveur.stratégie.FedAdagrad" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "serveur.stratégie.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "serveur.stratégie.FedYogi" #~ msgid "" -#~ "We now start a gRPC server with" -#~ " the `grpc.keepalive_permit_without_calls` option " -#~ "set to 0 by default. This prevents" -#~ " the clients from sending keepalive " -#~ "pings when there is no outstanding " -#~ "stream." +#~ "`achiverram28`, `Adam Narozniak`, `Anass " +#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," +#~ " `Daniel J. Beutel`, `Daniel Nata " +#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " +#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " +#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " +#~ "(Sīchàng)`, `Taner Topal`" #~ msgstr "" -#~ "Nous démarrons maintenant un serveur " -#~ "gRPC avec l'option " -#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" -#~ " par défaut, ce qui empêche les " -#~ "clients d'envoyer des pings de maintien" -#~ " lorsqu'il n'y a pas de flux en" -#~ " attente." #~ msgid "" -#~ "**General improvements** " -#~ "([#1872](https://github.com/adap/flower/pull/1872), " -#~ "[#1866](https://github.com/adap/flower/pull/1866), " -#~ "[#1884](https://github.com/adap/flower/pull/1884))" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" #~ msgstr "" -#~ "**Mettre à jour les exemples de " -#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " -#~ "[#1286](https://github.com/adap/flower/pull/1286), " -#~ "[#1282](https://github.com/adap/flower/pull/1282))" - -#~ msgid "Example projects" -#~ msgstr "Exemples" +#~ "Chargeons maintenant l'ensemble de formation" +#~ " et de test CIFAR-10, partitionnons-" +#~ "les en dix ensembles de données " +#~ "plus petits (chacun divisé en ensemble" +#~ " de formation et de validation), et" +#~ " enveloppons les partitions résultantes en" +#~ " créant un PyTorch ``DataLoader`` pour " +#~ "chacun d'entre eux :" #~ msgid "" -#~ "`Flower simulation PyTorch " -#~ "`_" -#~ msgstr "" -#~ "`Flower Quickstart (TensorFlow/Keras) " -#~ "`_" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " horizontal en utilisant XGBoost et " +#~ "Flower !" #~ msgid "" -#~ "`Android Kotlin example " -#~ "`_" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." -#~ msgid "`Android Java example `_" +#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" #~ msgstr "" -#~ msgid "Build a strategy from scratch" -#~ msgstr "Élaborer une stratégie à partir de zéro" - -#~ msgid "Customize the client" -#~ msgstr "Création du client IMDBC" - -#~ msgid "Get started with Flower" +#~ msgid "|7f1889391ad448e2a65920165f0d798c|" #~ msgstr "" -#~ msgid "Quickstart Android" -#~ msgstr "Démarrage rapide d'Android" +#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#~ msgstr "" -#~ msgid "" -#~ "Let's build a federated learning system" -#~ " using TFLite and Flower on Android!" +#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant TFLite et Flower sur" -#~ " Android !" -#~ msgid "" -#~ "Please refer to the `full code " -#~ "example " -#~ "`_ to" -#~ " learn more." +#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet " -#~ "`_ " -#~ "pour en savoir plus." -#~ msgid "Quickstart iOS" -#~ msgstr "Démarrage rapide iOS" +#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgstr "" -#~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Neural Network on " -#~ "MNIST using Flower and CoreML on " -#~ "iOS devices." +#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un réseau neuronal sur " -#~ "MNIST en utilisant Flower et CoreML " -#~ "sur les appareils iOS." -#~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" #~ msgstr "" -#~ "Tout d'abord, pour l'exécution du " -#~ "serveur Flower Python, il est recommandé" -#~ " de créer un environnement virtuel et" -#~ " de tout exécuter au sein d'un " -#~ "`virtualenv `_. Pour l'implémentation du client" -#~ " Flower dans iOS, il est recommandé" -#~ " d'utiliser Xcode comme notre IDE." -#~ msgid "" -#~ "Our example consists of one Python " -#~ "*server* and two iPhone *clients* that" -#~ " all have the same model." +#~ msgid "|9d20be8160f7451fb0f33b194506503f|" #~ msgstr "" -#~ "Notre exemple se compose d'un *serveur*" -#~ " Python et de deux *clients* iPhone" -#~ " qui ont tous le même modèle." -#~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." +#~ msgid "|3d949f76988443c59990d2e64f05c386|" #~ msgstr "" -#~ "*Les clients* sont chargés de générer" -#~ " des mises à jour de poids " -#~ "individuelles pour le modèle en fonction" -#~ " de leurs ensembles de données " -#~ "locaux. Ces mises à jour sont " -#~ "ensuite envoyées au *serveur* qui les" -#~ " agrège pour produire un meilleur " -#~ "modèle. Enfin, le *serveur* renvoie " -#~ "cette version améliorée du modèle à " -#~ "chaque *client*. Un cycle complet de " -#~ "mises à jour de poids s'appelle un" -#~ " *round*." -#~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started to setup our Flower server " -#~ "environment. We first need to install" -#~ " Flower. You can do this by " -#~ "using pip:" +#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" #~ msgstr "" -#~ "Maintenant que nous avons une idée " -#~ "approximative de ce qui se passe, " -#~ "commençons à configurer notre environnement" -#~ " de serveur Flower. Nous devons " -#~ "d'abord installer Flower, ce que tu " -#~ "peux faire à l'aide de pip :" -#~ msgid "Or Poetry:" -#~ msgstr "Ou de la poésie :" +#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training using CoreML " -#~ "as our local training pipeline and " -#~ "MNIST as our dataset. For simplicity " -#~ "reasons we will use the complete " -#~ "Flower client with CoreML, that has " -#~ "been implemented and stored inside the" -#~ " Swift SDK. The client implementation " -#~ "can be seen below:" +#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, exécutons une simple " -#~ "formation distribuée en utilisant CoreML " -#~ "comme pipeline de formation local et " -#~ "MNIST comme ensemble de données. Pour" -#~ " des raisons de simplicité, nous " -#~ "utiliserons le client Flower complet " -#~ "avec CoreML, qui a été mis en " -#~ "œuvre et stocké à l'intérieur du " -#~ "SDK Swift. La mise en œuvre du " -#~ "client peut être vue ci-dessous :" -#~ msgid "" -#~ "Let's create a new application project" -#~ " in Xcode and add :code:`flwr` as " -#~ "a dependency in your project. For " -#~ "our application, we will store the " -#~ "logic of our app in " -#~ ":code:`FLiOSModel.swift` and the UI elements" -#~ " in :code:`ContentView.swift`. We will " -#~ "focus more on :code:`FLiOSModel.swift` in " -#~ "this quickstart. Please refer to the " -#~ "`full code example " -#~ "`_ to " -#~ "learn more about the app." +#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" #~ msgstr "" -#~ "Créons un nouveau projet d'application " -#~ "dans Xcode et ajoutons :code:`flwr` " -#~ "comme dépendance dans ton projet. Pour" -#~ " notre application, nous stockerons la " -#~ "logique de notre application dans " -#~ ":code:`FLiOSModel.swift` et les éléments de" -#~ " l'interface utilisateur dans " -#~ ":code:`ContentView.swift`.Nous nous concentrerons " -#~ "davantage sur :code:`FLiOSModel.swift` dans ce" -#~ " quickstart. N'hésite pas à te " -#~ "référer à l'`exemple de code complet " -#~ "`_ pour" -#~ " en savoir plus sur l'application." -#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." #~ msgstr "" -#~ "Importe les paquets liés à Flower " -#~ "et CoreML dans :code:`FLiOSModel.swift` :" #~ msgid "" -#~ "Then add the mlmodel to the " -#~ "project simply by drag-and-drop, " -#~ "the mlmodel will be bundled inside " -#~ "the application during deployment to " -#~ "your iOS device. We need to pass" -#~ " the url to access mlmodel and " -#~ "run CoreML machine learning processes, " -#~ "it can be retrieved by calling the" -#~ " function :code:`Bundle.main.url`. For the " -#~ "MNIST dataset, we need to preprocess " -#~ "it into :code:`MLBatchProvider` object. The" -#~ " preprocessing is done inside " -#~ ":code:`DataLoader.swift`." +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." #~ msgstr "" -#~ "Ensuite, ajoute le mlmodel au projet " -#~ "simplement par glisser-déposer, le " -#~ "mlmodel sera regroupé à l'intérieur de" -#~ " l'application lors du déploiement sur " -#~ "ton appareil iOS. Nous devons passer " -#~ "l'url pour accéder au mlmodel et " -#~ "exécuter les processus d'apprentissage " -#~ "automatique CoreML, elle peut être " -#~ "récupérée en appelant la fonction " -#~ ":code:`Bundle.main.url`. Pour l'ensemble de " -#~ "données MNIST, nous devons le prétraiter" -#~ " dans l'objet :code:`MLBatchProvider`. Le " -#~ "prétraitement est effectué à l'intérieur " -#~ "de :code:`DataLoader.swift`." #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." #~ msgstr "" #~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "Exemple : PyTorch et MNIST" + #~ msgid "" -#~ "Then start the Flower gRPC client " -#~ "and start communicating to the server" -#~ " by passing our Flower client to " -#~ "the function :code:`startFlwrGRPC`." +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." #~ msgstr "" -#~ "Lance ensuite le client Flower gRPC " -#~ "et commence à communiquer avec le " -#~ "serveur en passant notre client Flower" -#~ " à la fonction :code:`startFlwrGRPC`." +#~ "Dans ce tutoriel, nous allons apprendre," +#~ " comment former un réseau neuronal " +#~ "convolutif sur MNIST en utilisant Flower" +#~ " et PyTorch." #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ "call the provided :code:`MLFlwrClient` and " -#~ "call :code:`startFlwrGRPC()`. The attribute " -#~ ":code:`hostname` and :code:`port` tells the" -#~ " client which server to connect to." -#~ " This can be done by entering " -#~ "the hostname and port in the " -#~ "application before clicking the start " -#~ "button to start the federated learning" -#~ " process." +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ "d'appeler le :code:`MLFlwrClient` fourni et" -#~ " d'appeler :code:`startFlwrGRPC()`. L'attribut " -#~ ":code:`hostname` et :code:`port` indique au" -#~ " client à quel serveur se connecter." -#~ " Pour ce faire, il suffit d'entrer" -#~ " le nom d'hôte et le port dans" -#~ " l'application avant de cliquer sur " -#~ "le bouton de démarrage pour lancer " -#~ "le processus d'apprentissage fédéré." +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, installons PyTorch et la" +#~ " bibliothèque **torchvision** :" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "Prêts... prêts... entraînez-vous !" #~ msgid "" -#~ "Once the server is running we can" -#~ " start the clients in different " -#~ "terminals. Build and run the client " -#~ "through your Xcode, one through Xcode" -#~ " Simulator and the other by deploying" -#~ " it to your iPhone. To see more" -#~ " about how to deploy your app " -#~ "to iPhone or Simulator visit `here " -#~ "`_." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." #~ msgstr "" -#~ "Une fois que le serveur fonctionne, " -#~ "nous pouvons démarrer les clients dans" -#~ " différents terminaux. Construis et exécute" -#~ " le client grâce à ton Xcode, " -#~ "l'un via le simulateur Xcode et " -#~ "l'autre en le déployant sur ton " -#~ "iPhone. Pour en savoir plus sur la" -#~ " façon de déployer ton application " -#~ "sur l'iPhone ou le simulateur, visite" -#~ " `ici `_." +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons un " +#~ "simple entraînement distribué avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " d'entraînement et l'architecture de notre" +#~ " réseau sont basées sur l'exemple " +#~ "MNIST de base de PyTorch " +#~ "`_. Cela" +#~ " te permettra de voir à quel " +#~ "point il est facile d'envelopper ton " +#~ "code avec Flower et de commencer " +#~ "l'entraînement de manière fédérée. Nous " +#~ "te fournissons deux scripts d'aide, à" +#~ " savoir *run-server.sh*, et *run-" +#~ "clients.sh*. N'aie pas peur de regarder" +#~ " à l'intérieur, ils sont assez " +#~ "simples =)." #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system in your ios device. The " -#~ "full `source code " -#~ "`_ for" -#~ " this example can be found in " -#~ ":code:`examples/ios`." -#~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré " -#~ "dans ton appareil ios. Le `code " -#~ "source complet " -#~ "`_ de " -#~ "cet exemple se trouve dans " -#~ ":code:`examples/ios`." +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." #~ msgid "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ and join " -#~ "the open-source Flower community on " -#~ "Slack to connect, ask questions, and " -#~ "get help: `Join Slack `__ 🌼 We'd love to hear" -#~ " from you in the ``#introductions`` " -#~ "channel! And if anything is unclear, " -#~ "head over to the ``#questions`` channel." +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." #~ msgstr "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ et rejoignez" -#~ " la communauté open-source Flower sur" -#~ " Slack pour vous connecter, poser des" -#~ " questions et obtenir de l'aide : " -#~ "`Join Slack `__ " -#~ "🌼 Nous serions ravis d'avoir de " -#~ "vos nouvelles dans le canal " -#~ "``#introductions`` ! Et si quelque chose" -#~ " n'est pas clair, dirigez-vous vers" -#~ " le canal ``#questions``." +#~ "Et voilà ! Tu devrais voir la " +#~ "procédure d'entraînement et, après quelques" +#~ " itérations, la précision du test " +#~ "pour chaque client." -#~ msgid "|bd48315a61c14495babefe3c7918b493|" -#~ msgstr "" +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." -#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" #~ msgstr "" +#~ "Dans le script d'aide au serveur " +#~ "*run-server.sh*, tu trouveras le code " +#~ "suivant qui exécute le fichier " +#~ ":code:`server.py`" -#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." #~ msgstr "" +#~ "Nous pouvons aller un peu plus " +#~ "loin et voir que :code:`server.py` lance" +#~ " simplement un serveur qui coordonnera " +#~ "trois tours de formation. Flower Les " +#~ "serveurs sont très personnalisables, mais " +#~ "pour les charges de travail simples, " +#~ "nous pouvons démarrer un serveur à " +#~ "l'aide de la fonction :ref:`start_server " +#~ "` et " +#~ "laisser toutes les possibilités de " +#~ "configuration à leurs valeurs par " +#~ "défaut, comme on peut le voir " +#~ "ci-dessous." -#~ msgid "|13a655510351455292f145a61d6c15d6|" +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." #~ msgstr "" +#~ "Ensuite, jetons un coup d'œil au " +#~ "fichier *run-clients.sh*. Tu verras " +#~ "qu'il contient la boucle principale qui" +#~ " démarre un ensemble de *clients*." -#~ msgid "|13949884182846e3a91433190a936ba9|" +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." #~ msgstr "" +#~ "**cid** : c'est l'identifiant du client." +#~ " C'est un nombre entier qui identifie" +#~ " de façon unique l'identifiant du " +#~ "client." -#~ msgid "|9bf26cc650b146e88b4745df040ece37|" -#~ msgstr "" +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." -#~ msgid "|1590915480fc41708bd43e48af9582f9|" +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." #~ msgstr "" +#~ "**Cette information n'est pas requise " +#~ "par le client, mais elle nous aide" +#~ " à partitionner l'ensemble de données " +#~ "MNIST original pour nous assurer que " +#~ "chaque client travaille sur des sous-" +#~ "ensembles uniques des ensembles *formation*" +#~ " et *test*." -#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." #~ msgstr "" +#~ "Encore une fois, nous pouvons aller " +#~ "plus loin et regarder dans " +#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" +#~ " avoir parcouru le code d'analyse des" +#~ " arguments au début de notre fonction" +#~ " :code:`main`, tu trouveras un appel " +#~ "à :code:`mnist.load_data`. Cette fonction est" +#~ " responsable du partitionnement des " +#~ "ensembles de données MNIST originaux " +#~ "(*training* et *test*) et renvoie un " +#~ ":code:`torch.utils.data.DataLoader` s pour chacun" +#~ " d'entre eux. Nous instancions ensuite " +#~ "un objet :code:`PytorchMNISTClient` avec notre" +#~ " ID client, nos DataLoaders, le " +#~ "nombre d'époques dans chaque tour et " +#~ "le périphérique que nous voulons " +#~ "utiliser pour l'entraînement (CPU ou " +#~ "GPU)." -#~ msgid "|84840b244edd47c481278ce534c126cd|" +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." #~ msgstr "" +#~ "L'objet :code:`PytorchMNISTClient` est finalement" +#~ " transmis à :code:`fl.client.start_client` avec" +#~ " l'adresse du serveur lorsque le " +#~ "processus de formation commence." -#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" -#~ msgstr "" +#~ msgid "A Closer Look" +#~ msgstr "Regarder de plus près" -#~ msgid "|5645db4ba9c945518d51ff234f35c797|" +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" #~ msgstr "" +#~ "Maintenant, examinons de près le " +#~ ":code:`PytorchMNISTClient` à l'intérieur du " +#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " +#~ "voyons ce qu'il fait :" -#~ msgid "|317af8d28fcc479ab981047d058c4751|" +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" #~ msgstr "" +#~ "La première chose à remarquer est " +#~ "que :code:`PytorchMNISTClient` instancie un " +#~ "modèle CNN dans son constructeur" -#~ msgid "|8bfd0e697a494d5385662debafade6bf|" +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." #~ msgstr "" +#~ "Le code du CNN est disponible sous" +#~ " :code:`quickstart-pytorch.mnist` et il est" +#~ " reproduit ci-dessous. Il s'agit du" +#~ " même réseau que celui que l'on " +#~ "trouve dans `Exemple basique de MNIST" +#~ " `_." #~ msgid "" -#~ "Differential privacy (DP) is often " -#~ "mentioned in the context of Federated" -#~ " Learning. It is a privacy-preserving" -#~ " method used when analyzing and " -#~ "sharing statistical data, ensuring the " -#~ "privacy of individual participants. DP " -#~ "achieves this by adding statistical " -#~ "noise to the model updates, ensuring " -#~ "any individual participants’ information " -#~ "cannot be distinguished or re-" -#~ "identified. This technique can be " -#~ "considered an optimization that provides " -#~ "a quantifiable privacy protection measure." +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" #~ msgstr "" -#~ "La confidentialité différentielle (DP) est " -#~ "souvent mentionnée dans le contexte de" -#~ " l'apprentissage fédéré. Il s'agit d'une" -#~ " méthode de préservation de la vie" -#~ " privée utilisée lors de l'analyse et" -#~ " du partage de données statistiques, " -#~ "garantissant la confidentialité des " -#~ "participants individuels. La DP y " -#~ "parvient en ajoutant un bruit " -#~ "statistique aux mises à jour du " -#~ "modèle, garantissant que toute information " -#~ "sur les participants individuels ne peut" -#~ " être distinguée ou réidentifiée. Cette " -#~ "technique peut être considérée comme une" -#~ " optimisation qui fournit une mesure " -#~ "quantifiable de protection de la vie " -#~ "privée." +#~ "La deuxième chose à noter est que" +#~ " la classe :code:`PytorchMNISTClient` hérite " +#~ "de :code:`fl.client.Client`, et qu'elle doit" +#~ " donc implémenter les méthodes suivantes" +#~ " :" -#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." #~ msgstr "" +#~ "En comparant la classe abstraite à " +#~ "sa classe dérivée :code:`PytorchMNISTClient`, " +#~ "tu remarqueras que :code:`fit` appelle " +#~ "une fonction :code:`train` et que " +#~ ":code:`evaluate` appelle une fonction " +#~ ":code:`test` :." -#~ msgid "API Reference - Flower binaries" +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" #~ msgstr "" - -#~ msgid "API Reference - flwr" -#~ msgstr "Référence pour l'API" +#~ "Ces fonctions se trouvent toutes deux" +#~ " dans le même module :code:`quickstart-" +#~ "pytorch.mnist` :" #~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" #~ msgstr "" +#~ "Observe que ces fonctions encapsulent " +#~ "les boucles d'entraînement et de test" +#~ " habituelles et fournissent à :code:`fit`" +#~ " et :code:`evaluate` les statistiques " +#~ "finales pour chaque tour. Tu pourrais" +#~ " les remplacer par tes boucles " +#~ "d'entraînement et de test personnalisées " +#~ "et changer l'architecture du réseau, et" +#~ " l'ensemble de l'exemple fonctionnerait " +#~ "toujours parfaitement. En fait, pourquoi " +#~ "ne pas essayer de modifier le code" +#~ " pour en faire un exemple qui " +#~ "te plairait ?" -#~ msgid "Returns a client's set of properties." -#~ msgstr "" +#~ msgid "Give It a Try" +#~ msgstr "Fais un essai" #~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might be change" -#~ " considerably in future versions of " -#~ "Flower." +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" #~ msgstr "" +#~ "En parcourant la description du code " +#~ "de démarrage rapide ci-dessus, tu " +#~ "auras acquis une bonne compréhension du" +#~ " fonctionnement des *clients* et des " +#~ "*serveurs* dans Flower, de l'exécution " +#~ "d'une expérience simple et de la " +#~ "structure interne d'un wrapper client. " +#~ "Voici quelques exemples que tu peux " +#~ "essayer par toi-même pour acquérir " +#~ "plus d'expérience avec Flower :" #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset,hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." #~ msgstr "" +#~ "Essaie de modifier :code:`PytorchMNISTClient` " +#~ "pour qu'il puisse accepter différentes " +#~ "architectures." #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not" -#~ msgstr "" - -#~ msgid "attempt to carry state over method invocations. Any state required by" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" #~ msgstr "" +#~ "Modifie la fonction :code:`train` pour " +#~ "qu'elle accepte différents optimiseurs" #~ msgid "" -#~ "the instance (model, dataset,hyperparameters, " -#~ "...) should be (re-)created in either" -#~ " the call to `client_fn` or the " -#~ "call to any of the client methods" -#~ " (e.g., load evaluation data in the" -#~ " `evaluate` method itself)." +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" #~ msgstr "" +#~ "Modifie la fonction :code:`test` pour " +#~ "qu'elle prouve non seulement le top-1" +#~ " (précision normale) mais aussi le " +#~ "top-5 ?" #~ msgid "" -#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" -#~ "\n" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" #~ msgstr "" +#~ "Essaie d'adapter le code à des " +#~ "images et à des ensembles de " +#~ "données plus grands. Pourquoi ne pas " +#~ "essayer de s'entraîner sur ImageNet avec" +#~ " un ResNet-50 ?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" #~ msgid "" -#~ "Adaptive Federated Optimization using Adagrad" -#~ " (FedAdagrad) [Reddi et al., 2020] " -#~ "strategy." +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." #~ msgstr "" +#~ "Flower fournit des classes d'enveloppe " +#~ "de confidentialité différentielle (DP) pour" +#~ " l'intégration facile des garanties " +#~ "centrales de DP fournies par DP-" +#~ "FedAvg dans les pipelines de formation" +#~ " définis dans n'importe lequel des " +#~ "divers cadres de ML avec lesquels " +#~ "Flower est compatible." #~ msgid "" -#~ "Adaptive Federated Optimization using Adam " -#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." #~ msgstr "" +#~ "Note que ces composants sont encore " +#~ "expérimentaux, la configuration correcte du" +#~ " DP pour une tâche spécifique est " +#~ "encore un problème non résolu." #~ msgid "" -#~ "Adaptive Federated Optimization using Yogi " -#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." #~ msgstr "" +#~ "Le nom DP-FedAvg est trompeur car" +#~ " il peut être appliqué à n'importe" +#~ " quel algorithme FL qui se conforme" +#~ " à la structure générale prescrite " +#~ "par la famille d'algorithmes FedOpt." -#~ msgid "Contributing Baselines" -#~ msgstr "Configuration du contributeur" +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" #~ msgid "" -#~ "Do you have a new federated " -#~ "learning paper and want to add a" -#~ " new baseline to Flower? Or do " -#~ "you want to add an experiment to" -#~ " an existing baseline paper? Great, " -#~ "we really appreciate your contribution." +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." #~ msgstr "" +#~ "DP-FedAvg, proposé à l'origine par " +#~ "McMahan et al. [mcmahan]_ et étendu " +#~ "par Andrew et al. [andrew]_, est " +#~ "essentiellement FedAvg avec les modifications" +#~ " suivantes." #~ msgid "" -#~ "The goal of Flower Baselines is to" -#~ " reproduce experiments from popular papers" -#~ " to accelerate researchers by enabling " -#~ "faster comparisons to new strategies, " -#~ "datasets, models, and federated pipelines " -#~ "in general." +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." #~ msgstr "" +#~ "**Clipping** : L'influence de la mise" +#~ " à jour de chaque client est " +#~ "limitée en l'écrêtant. Ceci est réalisé" +#~ " en imposant un plafond à la " +#~ "norme L2 de la mise à jour, " +#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "Before you start to work on a " -#~ "new baseline or experiment, please check" -#~ " the `Flower Issues " -#~ "`_ or `Flower " -#~ "Pull Requests `_ " -#~ "to see if someone else is already" -#~ " working on it. Please open a " -#~ "new issue if you are planning to" -#~ " work on a new baseline or " -#~ "experiment with a short description of" -#~ " the corresponding paper and the " -#~ "experiment you want to contribute." -#~ msgstr "" - -#~ msgid "TL;DR: Adding a new Flower Baseline" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." #~ msgstr "" +#~ "**Bruit** : un bruit gaussien, calibré" +#~ " sur le seuil d'écrêtage, est ajouté" +#~ " à la moyenne calculée au niveau " +#~ "du serveur." #~ msgid "" -#~ "Let's say you want to contribute " -#~ "the code of your most recent " -#~ "Federated Learning publication, *FedAweseome*. " -#~ "There are only three steps necessary " -#~ "to create a new *FedAweseome* Flower " -#~ "Baseline:" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." #~ msgstr "" +#~ "Il a été démontré que la " +#~ "distribution de la norme de mise à" +#~ " jour varie d'une tâche à l'autre " +#~ "et évolue au fur et à mesure " +#~ "de la formation. C'est pourquoi nous " +#~ "utilisons une approche adaptative [andrew]_" +#~ " qui ajuste continuellement le seuil " +#~ "d'écrêtage pour suivre un quantile " +#~ "prédéfini de la distribution de la " +#~ "norme de mise à jour." -#~ msgid "**Get the Flower source code on your machine**" +#~ msgid "Simplifying Assumptions" +#~ msgstr "Simplifier les hypothèses" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." #~ msgstr "" +#~ "Nous formulons (et tentons d'appliquer) " +#~ "un certain nombre d'hypothèses qui " +#~ "doivent être satisfaites pour que le " +#~ "processus de formation réalise réellement " +#~ "les garanties :math:`(\\epsilon, \\delta)` que" +#~ " l'utilisateur a à l'esprit lorsqu'il " +#~ "configure l'installation." #~ msgid "" -#~ "Fork the Flower codebase: got to " -#~ "the `Flower GitHub repo " -#~ "`_ and fork the " -#~ "code (click the *Fork* button in " -#~ "the top-right corner and follow " -#~ "the instructions)" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." #~ msgstr "" +#~ "**Sous-échantillonnage de taille fixe** " +#~ ":Des sous-échantillons de taille fixe" +#~ " des clients doivent être prélevés à" +#~ " chaque tour, par opposition aux " +#~ "sous-échantillons de Poisson de taille " +#~ "variable." #~ msgid "" -#~ "Clone the (forked) Flower source code:" -#~ " :code:`git clone " -#~ "git@github.com:[your_github_username]/flower.git`" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." #~ msgstr "" +#~ "**Moyenne non pondérée** : Les " +#~ "contributions de tous les clients " +#~ "doivent être pondérées de façon égale" +#~ " dans l'ensemble afin que le serveur" +#~ " n'ait pas à connaître à l'avance " +#~ "la somme des poids de tous les " +#~ "clients disponibles pour la sélection." #~ msgid "" -#~ "Open the code in your favorite " -#~ "editor (e.g., using VSCode: ``cd flower" -#~ " ; code .``)" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." #~ msgstr "" +#~ "**Aucune défaillance de client** : " +#~ "L'ensemble des clients disponibles doit " +#~ "rester constant pendant toutes les " +#~ "séries de formation. En d'autres termes," +#~ " les clients ne peuvent pas " +#~ "abandonner ou échouer." -#~ msgid "**Add the FedAwesome code**" +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." #~ msgstr "" +#~ "Les deux premiers sont utiles pour " +#~ "éliminer une multitude de complications " +#~ "liées au calibrage du bruit en " +#~ "fonction du seuil d'écrêtage, tandis que" +#~ " le troisième est nécessaire pour se" +#~ " conformer aux hypothèses de l'analyse " +#~ "de la vie privée." #~ msgid "" -#~ "Add your :code:`FedAwesome` code under " -#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." #~ msgstr "" +#~ "Ces restrictions sont conformes aux " +#~ "contraintes imposées par Andrew et al." +#~ " [andrew]_." -#~ msgid "Add a `pyproject.toml` with all necessary dependencies" +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." #~ msgstr "" +#~ "Contrairement à d'autres implémentations où" +#~ " l'ajout de bruit est effectué au " +#~ "niveau du serveur, tu peux configurer" +#~ " le site d'injection de bruit pour" +#~ " qu'il corresponde mieux à ton modèle" +#~ " de menace. Nous offrons aux " +#~ "utilisateurs la possibilité de configurer " +#~ "l'entraînement de telle sorte que chaque" +#~ " client ajoute indépendamment une petite" +#~ " quantité de bruit à la mise à" +#~ " jour écrêtée, ce qui fait que " +#~ "le simple fait d'agréger les mises " +#~ "à jour bruyantes équivaut à l'ajout " +#~ "explicite de bruit à l'agrégat non " +#~ "bruyant au niveau du serveur." -#~ msgid "Add a `README.md` describing how to use your baseline" +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." #~ msgstr "" +#~ "Pour être précis, si nous laissons " +#~ ":math:`m` être le nombre de clients " +#~ "échantillonnés à chaque tour et " +#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" +#~ " gaussien total qui doit être ajouté" +#~ " à la somme des mises à jour" +#~ " du modèle, nous pouvons utiliser des" +#~ " mathématiques simples pour montrer que " +#~ "cela équivaut à ce que chaque " +#~ "client ajoute du bruit avec l'échelle" +#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." -#~ msgid "**Open a pull request**" -#~ msgstr "" +#~ msgid "Wrapper-based approach" +#~ msgstr "Approche basée sur l'enveloppe" -#~ msgid "Stage your changes: :code:`git add .`" +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." #~ msgstr "" +#~ "L'introduction du DP dans une charge " +#~ "de travail existante peut être " +#~ "considérée comme l'ajout d'une couche de" +#~ " sécurité supplémentaire autour d'elle. " +#~ "Cela nous a incités à fournir la" +#~ " logique supplémentaire côté serveur et " +#~ "côté client nécessaire pour rendre le" +#~ " processus de formation différentiellement " +#~ "privé en tant qu'enveloppes pour les " +#~ "instances des classes abstraites " +#~ ":code:`Strategy` et :code:`NumPyClient` " +#~ "respectivement. Cette approche basée sur " +#~ "l'enveloppe a l'avantage d'être facilement " +#~ "composable avec d'autres enveloppes que " +#~ "quelqu'un pourrait contribuer à la " +#~ "bibliothèque Flower à l'avenir, par " +#~ "exemple, pour l'agrégation sécurisée. " +#~ "L'utilisation de l'héritage à la place" +#~ " peut être fastidieuse car cela " +#~ "nécessiterait la création de nouvelles " +#~ "sous-classes chaque fois qu'une nouvelle" +#~ " classe mettant en œuvre :code:`Strategy`" +#~ " ou :code:`NumPyClient` est définie." #~ msgid "" -#~ "Commit & push: :code:`git commit -m " -#~ "\"Create new FedAweseome baseline\" ; " -#~ "git push`" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." #~ msgstr "" +#~ "La première version de notre solution" +#~ " consistait à définir un décorateur " +#~ "dont le constructeur acceptait, entre " +#~ "autres, une variable à valeur booléenne" +#~ " indiquant si l'écrêtage adaptatif devait" +#~ " être activé ou non. Nous nous " +#~ "sommes rapidement rendu compte que cela" +#~ " encombrerait sa fonction :code:`__init__()` " +#~ "avec des variables correspondant aux " +#~ "hyperparamètres de l'écrêtage adaptatif qui" +#~ " resteraient inutilisées lorsque celui-ci" +#~ " était désactivé. Une implémentation plus" +#~ " propre pourrait être obtenue en " +#~ "divisant la fonctionnalité en deux " +#~ "décorateurs, :code:`DPFedAvgFixed` et " +#~ ":code:`DPFedAvgAdaptive`, le second sous-" +#~ "classant le premier. Les constructeurs " +#~ "des deux classes acceptent un paramètre" +#~ " booléen :code:`server_side_noising` qui, comme" +#~ " son nom l'indique, détermine l'endroit " +#~ "où le noising doit être effectué." #~ msgid "" -#~ "Open a pull request: go to *your*" -#~ " fork of the Flower codebase and " -#~ "create a pull request that targets " -#~ "the Flower ``main``` branch" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." #~ msgstr "" - -#~ msgid "Further reading:" -#~ msgstr "Aide supplémentaire" +#~ "Les capacités côté serveur requises pour" +#~ " la version originale de DP-FedAvg," +#~ " c'est-à-dire celle qui effectue un " +#~ "écrêtage fixe, peuvent être entièrement " +#~ "capturées à l'aide d'une logique " +#~ "d'enveloppement pour les deux méthodes " +#~ "suivantes de la classe abstraite " +#~ ":code:`Strategy`." #~ msgid "" -#~ "`GitHub docs: About forks " -#~ "`_" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." #~ msgstr "" +#~ ":code:`configure_fit()` : Le dictionnaire de" +#~ " configuration envoyé par la " +#~ ":code:`Strategy` enveloppée à chaque client" +#~ " doit être augmenté d'une valeur " +#~ "supplémentaire égale au seuil d'écrêtage " +#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " +#~ "si :code:`server_side_noising=true`, d'une autre " +#~ "égale à l'échelle du bruit gaussien " +#~ "qui doit être ajouté au client " +#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " `_" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgstr "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " from a fork `_" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." #~ msgstr "" - -#~ msgid "Requirements" -#~ msgstr "Changements nécessaires" +#~ "Nous ne pouvons pas modifier directement" +#~ " la fonction d'agrégation de la " +#~ "stratégie enveloppée pour la forcer à" +#~ " ajouter du bruit à l'agrégat, c'est" +#~ " pourquoi nous simulons le bruit côté" +#~ " client pour mettre en œuvre le " +#~ "bruit côté serveur." #~ msgid "" -#~ "Contributing a new baseline is really" -#~ " easy. You only have to make " -#~ "sure that your federated learning " -#~ "experiments are running with Flower. As" -#~ " soon as you have created a " -#~ "Flower-based experiment, you can contribute" -#~ " it." +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." #~ msgstr "" +#~ "Ces modifications ont été regroupées " +#~ "dans une classe appelée :code:`DPFedAvgFixed`," +#~ " dont le constructeur accepte la " +#~ "stratégie décorée, le seuil d'écrêtage " +#~ "et le nombre de clients échantillonnés" +#~ " à chaque tour comme arguments " +#~ "obligatoires. L'utilisateur est censé " +#~ "spécifier le seuil d'écrêtage car " +#~ "l'ordre de grandeur des normes de " +#~ "mise à jour dépend fortement du " +#~ "modèle formé et fournir une valeur " +#~ "par défaut serait trompeur. Le nombre" +#~ " de clients échantillonnés à chaque " +#~ "tour est nécessaire pour calculer la " +#~ "quantité de bruit qui doit être " +#~ "ajoutée à chaque mise à jour " +#~ "individuelle, que ce soit par le " +#~ "serveur ou par les clients." #~ msgid "" -#~ "It is recommended (but not required) " -#~ "to use `Hydra `_ to " -#~ "execute the experiment." +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." #~ msgstr "" +#~ "La fonctionnalité supplémentaire nécessaire " +#~ "pour faciliter l'écrêtage adaptatif a " +#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" +#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" +#~ " remplace les méthodes mentionnées ci-" +#~ "dessus pour effectuer les opérations " +#~ "suivantes." #~ msgid "" -#~ "Please make sure to add your " -#~ "baseline or experiment to the " -#~ "corresponding directory as explained in " -#~ "`Executing Baseline `_. Give your baseline the " -#~ "unique identifier. For example, :code:`fedbn`" -#~ " refers to the paper \"FedBN: " -#~ "Federated Learning on non-IID Features" -#~ " via Local Batch Normalization\" and " -#~ "creates the corresponding directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" -#~ " you create the experiment directory " -#~ "with the experiment name. For example," -#~ " the experiment that measures the " -#~ "convergence has the directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." -#~ " This directory contains all your " -#~ "code and a :code:`README.md` with a " -#~ "link to the paper, the paper's " -#~ "abstract, and a detailed description of" -#~ " how to execute the experiments." +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." #~ msgstr "" +#~ ":code:`configure_fit()` : Il intercepte le " +#~ "dict de configuration renvoyé par " +#~ ":code:`super.configure_fit()` pour y ajouter " +#~ "la paire clé-valeur " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " +#~ "client interprète comme une instruction " +#~ "d'inclure un bit indicateur (1 si " +#~ "la norme de mise à jour <= " +#~ "seuil d'écrêtage, 0 sinon) dans les " +#~ "résultats qu'il renvoie." #~ msgid "" -#~ "Please also check if :code:`pyproject.toml`" -#~ " and :code:`requirements.txt` (all in the" -#~ " directory `baselines " -#~ "`_ contain" -#~ " all required Python packages (libraries," -#~ " frameworks, ...). If the required " -#~ "Python package is not yet listed, " -#~ "please add it to :code:`pyproject.toml`. " -#~ "If you need a different version of" -#~ " a package already listed, please try" -#~ " to ensure your experiment runs with" -#~ " the existing version listed in " -#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " -#~ "If that doesn't work, open a " -#~ "GitHub Issue and request the version " -#~ "change." +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." #~ msgstr "" +#~ ":code:`aggregate_fit()` : Il fait suivre " +#~ "un appel à :code:`super.aggregate_fit()` d'un" +#~ " appel à :code:`__update_clip_norm__()`, une " +#~ "procédure qui ajuste le seuil d'écrêtage" +#~ " sur la base des bits indicateurs " +#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "The experiment also needs to contain " -#~ "a file with a downloader for the" -#~ " dataset - if possible automatic. " -#~ "This can be included in one of " -#~ "the files or as an extra file." +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." #~ msgstr "" +#~ "Les capacités requises côté client " +#~ "peuvent être entièrement capturées par " +#~ "une logique de wrapper pour la " +#~ "seule méthode :code:`fit()` de la classe" +#~ " abstraite :code:`NumPyClient`. Pour être " +#~ "précis, nous devons *post-traiter* la" +#~ " mise à jour calculée par le " +#~ "client wrapped pour l'écrêter, si " +#~ "nécessaire, à la valeur seuil fournie" +#~ " par le serveur dans le cadre " +#~ "du dictionnaire de configuration. En " +#~ "plus de cela, il peut avoir besoin" +#~ " d'effectuer un travail supplémentaire si" +#~ " l'une des clés suivantes (ou les " +#~ "deux) est également présente dans le " +#~ "dict." #~ msgid "" -#~ "Finally, please add plots for all " -#~ "experimental results your code is " -#~ "running to the :code:`experiment` directory" -#~ " and include them in :code:`README.md`. " -#~ "Doing this helps others and enables " -#~ "them to recognize your contributions " -#~ "quickly." +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." #~ msgstr "" +#~ ":code:`dpfedavg_noise_stddev` : Génère et " +#~ "ajoute la quantité de bruit spécifiée" +#~ " à la mise à jour de " +#~ "l'écrêtage." #~ msgid "" -#~ "We are aware that a few libraries" -#~ " are available only via Conda. " -#~ "However, we want to encourage you " -#~ "to ensure that your code also runs" -#~ " well outside of Conda to make " -#~ "it more accessible to the broader " -#~ "research community." +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." #~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " +#~ "les métriques dict dans l'objet " +#~ ":code:`FitRes` renvoyé au serveur avec " +#~ "un bit indicateur, calculé comme décrit" +#~ " précédemment." -#~ msgid "Here is a checklist for adding a new baseline:" -#~ msgstr "" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" #~ msgid "" -#~ "add required Python packages to " -#~ ":code:`pyproject.toml` or :code:`requirements.txt`" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." #~ msgstr "" +#~ "Supposons que tu te sois entraîné " +#~ "pendant :math:`n` tours avec la fraction" +#~ " d'échantillonnage :math:`q` et le " +#~ "multiplicateur de bruit :math:`z`. Afin " +#~ "de calculer la valeur :math:`epsilon` " +#~ "qui en résulterait pour un " +#~ ":math:`\\delta` particulier, le script suivant" +#~ " peut être utilisé." #~ msgid "" -#~ "add all required code under " -#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" -#~ msgstr "" - -#~ msgid "add a dataset downloader" -#~ msgstr "" - -#~ msgid "add an experiment plot" +#~ "`How to run Flower using Docker " +#~ "`_" #~ msgstr "" -#~ msgid "add a :code:`README.md`" +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" #~ msgstr "" -#~ msgid "Usability" +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "Flower is known and loved for its" -#~ " usability. Therefore, make sure that " -#~ "your baseline or experiment can be " -#~ "executed with a single command such " -#~ "as :code:`./run.sh` or :code:`python3 " -#~ "main.py`. How you organize the " -#~ "experiments and the related code " -#~ "structure is up to you as an " -#~ "author, but please keep in mind to" -#~ " make sure that other users can " -#~ "easily understand and execute your " -#~ "baseline." +#~ msgid ":py:obj:`flwr.server.driver `\\" #~ msgstr "" -#~ msgid "We look forward to your contribution!" -#~ msgstr "Exemple de première contribution" +#~ msgid "Flower driver SDK." +#~ msgstr "Serveur de Flower" -#~ msgid "flwr" -#~ msgstr "Fleur" +#~ msgid "driver" +#~ msgstr "serveur" -#~ msgid "binaries" +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Flower Baselines" -#~ msgstr "Demande pour une nouvelle Flower Baseline" - #~ msgid "" -#~ "Flower Baselines are a collection of " -#~ "organised scripts used to reproduce " -#~ "results from well-known publications or" -#~ " benchmarks. You can check which " -#~ "baselines already exist and/or contribute " -#~ "your own baseline." +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Flower requires `Python 3.7 `_ or above." -#~ msgstr "`Python 3.7 `_ ou plus" - -#~ msgid "|9e234df38403464899ad3aee36bf1b95|" +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ msgid "|081158351506446f9f772cb45ee68523|" +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." #~ msgstr "" -#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" #~ msgstr "" -#~ msgid "|11b83bb107344db78a37266e080c4a7a|" +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" #~ msgstr "" -#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" +#~ msgid "Get task results." #~ msgstr "" -#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" #~ msgstr "" -#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" +#~ msgid "Schedule tasks." #~ msgstr "" -#~ msgid "|4b149f3a095b402bb8890275aabc9298|" +#~ msgid "GrpcDriver" #~ msgstr "" -#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" +#~ msgid ":py:obj:`connect `\\ \\(\\)" #~ msgstr "" -#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" +#~ msgid "Connect to the Driver API." #~ msgstr "" -#~ msgid "|d669336577b545a081d5d74169a9bc4d|" +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" #~ msgstr "" -#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" -#~ msgstr "" +#~ msgid "Request for run ID." +#~ msgstr "Demande pour une nouvelle Flower Baseline" -#~ msgid "|29a11f5353084c1995c538f7edef71a5|" +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" #~ msgstr "" -#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" +#~ msgid "Disconnect from the Driver API." #~ msgstr "" -#~ msgid "Using Baselines" +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" #~ msgstr "" -#~ msgid "Structure" -#~ msgstr "" +#~ msgid "Get client IDs." +#~ msgstr "Moteur client Edge" #~ msgid "" -#~ "All baselines are available in the " -#~ "directory `baselines " -#~ "`_. This " -#~ "directory has two different files:" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Both files contain all the information" -#~ " about required Python packages (libraries," -#~ " frameworks, ...) and their versions. " -#~ "You can install each library separately" -#~ " by using :code: `pip install` or " -#~ "you can use Poetry and run " -#~ "code:`poetry install` in the directory " -#~ "where you find the :code:`pyproject.toml` " -#~ "file. After installing all requirements, " -#~ "you can start to run your " -#~ "baseline." +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" #~ msgstr "" #~ msgid "" -#~ "Go to the baseline that you want" -#~ " to execute. The directories and " -#~ "files are structured so that you " -#~ "can first find the paper with " -#~ "their unique identifier such that, for" -#~ " example, :code:`FedProx` refers to the " -#~ "paper \"Federated Optimization in " -#~ "Heterogeneous Networks\". The :code:`fedprox` " -#~ "section contains all available experiments " -#~ "from that paper." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." #~ msgstr "" #~ msgid "" -#~ "The experiment area contains a " -#~ ":code:`README.md` covering the corresponding " -#~ "paper, its abstract, and goal as " -#~ "well as a detailed description of " -#~ "how to run the baseline. Please " -#~ "use the :code:`README.md` to see how " -#~ "to execute each individual baseline." +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." #~ msgstr "" -#~ msgid "Available Baselines" +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." #~ msgstr "" +#~ "Les exemples d'utilisation de Flower " +#~ "étaient auparavant regroupés avec Flower " +#~ "dans un paquet appelé ``flwr_example``. " +#~ "Nous migrons ces exemples vers des " +#~ "projets autonomes pour les rendre plus" +#~ " faciles à utiliser. Tous les " +#~ "nouveaux exemples sont basés dans le " +#~ "répertoire ``examples " +#~ "`_." + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "Démarrage rapide de TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "Exemples hérités (`flwr_example`)" #~ msgid "" -#~ "The following table lists all currently" -#~ " available baselines and the corresponding" -#~ " papers. If you want to add a" -#~ " new baseline or experiment, please " -#~ "check the `Contributing Baselines " -#~ "`_ section." +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" +#~ "Les exemples d'utilisation dans `flwr_example`" +#~ " sont obsolètes et seront supprimés à" +#~ " l'avenir. De nouveaux exemples sont " +#~ "fournis en tant que projets autonomes" +#~ " dans `examples " +#~ "`_." -#~ msgid "Paper" -#~ msgstr "" +#~ msgid "Extra Dependencies" +#~ msgstr "Dépendances supplémentaires" -#~ msgid "Experiment" +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." #~ msgstr "" +#~ "Le noyau du framework Flower conserve" +#~ " un ensemble minimal de dépendances. " +#~ "Les exemples démontrent Flower dans le" +#~ " contexte de différents frameworks " +#~ "d'apprentissage automatique, de sorte que " +#~ "des dépendances supplémentaires doivent être" +#~ " installées avant qu'un exemple puisse " +#~ "être exécuté." -#~ msgid "Directory" -#~ msgstr "" +#~ msgid "For PyTorch examples::" +#~ msgstr "Pour les exemples de PyTorch: :" -#~ msgid "`FedAvg `_" +#~ msgid "For TensorFlow examples::" +#~ msgstr "Pour les exemples de TensorFlow : :" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" +#~ "Tu peux consulter :code:`pyproject.toml` pour" +#~ " une liste complète des extras " +#~ "possibles (section :code:`[tool.poetry.extras]`)." -#~ msgid "MNIST" +#~ msgid "PyTorch Examples" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." #~ msgstr "" +#~ "Nos exemples PyTorch sont basés sur " +#~ "PyTorch 1.7. Ils devraient fonctionner " +#~ "avec d'autres versions également. Jusqu'à " +#~ "présent, nous fournissons les exemples " +#~ "suivants." -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "Classification d'images CIFAR-10" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" +#~ "`CIFAR-10 et CIFAR-100 " +#~ "`_ sont des" +#~ " ensembles de données d'images RVB " +#~ "populaires. L'exemple Flower CIFAR-10 utilise" +#~ " PyTorch pour former un classificateur " +#~ "CNN simple dans une configuration " +#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "`FedProx `_" +#~ msgid "First, start a Flower server:" +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" #~ msgstr "" +#~ "Ensuite, démarre les deux clients dans" +#~ " une nouvelle fenêtre de terminal :" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 Classification des images" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." #~ msgstr "" +#~ "`ImageNet-2012 `_ est " +#~ "l'un des principaux ensembles de données" +#~ " de vision par ordinateur. L'exemple " +#~ "Flower ImageNet utilise PyTorch pour " +#~ "entraîner un classificateur ResNet-18 dans " +#~ "une configuration d'apprentissage fédéré avec" +#~ " dix clients." -#~ msgid "`FedOpt `_" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." #~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/pytorch_imagenet`." + +#~ msgid "TensorFlow Examples" +#~ msgstr "Exemples de TensorFlow" -#~ msgid "sparse gradient task" +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." #~ msgstr "" +#~ "Nos exemples TensorFlow sont basés sur" +#~ " TensorFlow 2.0 ou une version plus" +#~ " récente. Jusqu'à présent, nous te " +#~ "proposons les exemples suivants." -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" -#~ msgstr "" +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Classification d'images Fashion-MNIST" -#~ msgid "`FedBN `_" +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." #~ msgstr "" +#~ "nous suivons cette tradition et " +#~ "fournissons un exemple qui échantillonne " +#~ "des ensembles de données locales " +#~ "aléatoires de Fashion-MNIST et entraîne" +#~ " un modèle simple de classification " +#~ "d'images sur ces partitions." -#~ msgid "convergence rate" -#~ msgstr "" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" -#~ msgstr "" +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgid "" -#~ "Flower requires `Python 3.7 " -#~ "`_ or above, we " -#~ "recommend `Python 3.8 " -#~ "`_." -#~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." - -#~ msgid "|6baade94cd14454e82ead34fcc29a182|" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgstr "" +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." #~ msgstr "" -#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" #~ msgstr "" -#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" #~ msgstr "" -#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" #~ msgstr "" -#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" #~ msgstr "" -#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" #~ msgstr "" -#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" #~ msgstr "" -#~ msgid "|1cfc77af5d164030942e84d14268c256|" +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" #~ msgstr "" -#~ msgid "|0d50828231a64bc08223544a2d2fa216|" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" #~ msgstr "" -#~ msgid "|904387757ceb42fbaa1875f3e8061113|" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" #~ msgstr "" -#~ msgid "|68608e1b7c4842458c528b431c715f5a|" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" #~ msgstr "" -#~ msgid "|2adb106bda97480bb4b33eac472e321e|" +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" #~ msgstr "" -#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" #~ msgstr "" -#~ msgid "Before the release" -#~ msgstr "Avant la sortie" - -#~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" #~ msgstr "" -#~ "Mettez à jour le journal des " -#~ "modifications (``changelog.md``) avec tous les" -#~ " changements pertinents qui se sont " -#~ "produits après la dernière version. Si" -#~ " la dernière version a été étiquetée" -#~ " ``v1.2.0``, vous pouvez utiliser l'URL " -#~ "suivante pour voir tous les commits " -#~ "qui ont été fusionnés dans ``main`` " -#~ "depuis lors :" -#~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" #~ msgstr "" -#~ "`GitHub : Compare v1.2.0...main " -#~ "`_" -#~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This command helps" -#~ " extract them: ``git log --format='%aN' " -#~ "v1.1.0..HEAD | sort -u``. The command" -#~ " has the same order as ``git " -#~ "shortlog``." +#~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" -#~ "Remerciez les auteurs qui ont contribué" -#~ " depuis la dernière version. Cette " -#~ "commande permet de les extraire : " -#~ "``git log --format='%aN' v1.1.0..HEAD | " -#~ "sort -u``. La commande a le même" -#~ " ordre que ``git shortlog``." #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub " +#~ "`_ that include" +#~ " all necessary dependencies for running " +#~ "the server. You can also build " +#~ "your own custom docker images from " +#~ "scratch with a different version of " +#~ "Python or Ubuntu if that is what" +#~ " you need. In this guide, we " +#~ "will explain what images exist and " +#~ "how to build them locally." #~ msgstr "" -#~ "Mettez à jour l'en-tête de section" -#~ " ``changelog.md`` ``Unreleased`` pour qu'il " -#~ "contienne le numéro de version et " -#~ "la date de la version que vous " -#~ "construisez. Créez une demande de " -#~ "traction avec le changement." #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``" +#~ "Currently, Flower provides two images, a" +#~ " base image and a server image. " +#~ "There will also be a client image" +#~ " soon. The base image, as the " +#~ "name suggests, contains basic dependencies " +#~ "that both the server and the " +#~ "client need. This includes system " +#~ "dependencies, Python and Python tools. " +#~ "The server image is based on the" +#~ " base image, but it additionally " +#~ "installs the Flower server using " +#~ "``pip``." #~ msgstr "" -#~ "Marquez le commit de la version " -#~ "avec le numéro de version dès que" -#~ " le PR est fusionné : ``git tag" -#~ " v0.12.3``, puis ``git push --tags``" #~ msgid "" -#~ "Build the release with ``./dev/build.sh``, " -#~ "then publish it with ``./dev/publish.sh``" +#~ "Both, base and server image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ "Construisez la version avec " -#~ "``./dev/build.sh``, puis publiez-la avec " -#~ "``./dev/publish.sh``" -#~ msgid "" -#~ "Create an entry in GitHub releases " -#~ "with the release notes for the " -#~ "previously tagged commit and attach the" -#~ " build artifacts (:code:`.whl` and " -#~ ":code:`.tar.gz`)." +#~ msgid "Defaults to ``flwr/server``." #~ msgstr "" -#~ "Crée une entrée dans GitHub releases " -#~ "avec les notes de version pour le" -#~ " commit précédemment étiqueté et attache" -#~ " les artefacts de construction " -#~ "(:code:`.whl` et :code:`.tar.gz`)." -#~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ msgid "``BASE_IMAGE_TAG``" #~ msgstr "" -#~ "Deuxièmement, créer un environnement virtuel" -#~ " (et l'activer). Si vous choisissez " -#~ "d'utiliser :code:`pyenv` (avec le plugin " -#~ ":code:`pyenv-virtualenv`) et que vous " -#~ "l'avez déjà installé, vous pouvez " -#~ "utiliser le script suivant (par défaut" -#~ " il utilisera :code:`Python 3.8.17`, mais" -#~ " vous pouvez le changer en " -#~ "fournissant une :code:`` spécifique)::" - -#~ msgid "server.strategy.FedAvg" -#~ msgstr "serveur.stratégie.FedAvg" - -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "stratégie.serveur.FedAvgM" - -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" - -#~ msgid "server.strategy.FedOpt" -#~ msgstr "serveur.stratégie.FedOpt" -#~ msgid "server.strategy.FedProx" -#~ msgstr "serveur.stratégie.FedProx" - -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "serveur.stratégie.FedAdagrad" - -#~ msgid "server.strategy.FedAdam" -#~ msgstr "serveur.stratégie.FedAdam" +#~ msgid "The image tag of the base image." +#~ msgstr "" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "serveur.stratégie.FedYogi" +#~ msgid "Defaults to ``py3.11-ubuntu22.04``." +#~ msgstr "" #~ msgid "" -#~ "`achiverram28`, `Adam Narozniak`, `Anass " -#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," -#~ " `Daniel J. Beutel`, `Daniel Nata " -#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " -#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " -#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " -#~ "(Sīchàng)`, `Taner Topal`" +#~ "The following example creates a server" +#~ " image with the official Flower base" +#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "The name of image is ``flwr_server`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." #~ msgstr "" -#~ "Chargeons maintenant l'ensemble de formation" -#~ " et de test CIFAR-10, partitionnons-" -#~ "les en dix ensembles de données " -#~ "plus petits (chacun divisé en ensemble" -#~ " de formation et de validation), et" -#~ " enveloppons les partitions résultantes en" -#~ " créant un PyTorch ``DataLoader`` pour " -#~ "chacun d'entre eux :" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "and ``BASE_IMAGE_TAG`` build arguments. The" +#~ " value of ``BASE_REPOSITORY`` must match" +#~ " the name of your image and the" +#~ " value of ``BASE_IMAGE_TAG`` must match " +#~ "the tag of your image." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " horizontal en utilisant XGBoost et " -#~ "Flower !" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet `_ pour en " -#~ "savoir plus." -#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" -#~ msgstr "" +#~ msgid "Open a PR (as shown above)" +#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" -#~ msgid "|7f1889391ad448e2a65920165f0d798c|" +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" + +#~ msgid "Changelog entry" +#~ msgstr "Changelog" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." #~ msgstr "" -#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" #~ msgstr "" -#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." #~ msgstr "" -#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" -#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." #~ msgstr "" -#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" #~ msgstr "" -#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#~ msgid " is for classifying a PR as a general improvement." #~ msgstr "" -#~ msgid "|9d20be8160f7451fb0f33b194506503f|" +#~ msgid " is to not add the PR to the changelog" #~ msgstr "" -#~ msgid "|3d949f76988443c59990d2e64f05c386|" +#~ msgid " is to add a general baselines change to the PR" #~ msgstr "" -#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" +#~ msgid " is to add a general examples change to the PR" #~ msgstr "" -#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#~ msgid " is to add a general sdk change to the PR" #~ msgstr "" -#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#~ msgid " is to add a general simulations change to the PR" #~ msgstr "" -#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" +#~ msgid "Note that only one token should be used." #~ msgstr "" #~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" #~ msgstr "" #~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" #~ msgstr "" #~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "Exemple : PyTorch et MNIST" - #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre," -#~ " comment former un réseau neuronal " -#~ "convolutif sur MNIST en utilisant Flower" -#~ " et PyTorch." #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ "Puisque nous voulons utiliser PyTorch " -#~ "pour résoudre une tâche de vision " -#~ "par ordinateur, installons PyTorch et la" -#~ " bibliothèque **torchvision** :" - -#~ msgid "Ready... Set... Train!" -#~ msgstr "Prêts... prêts... entraînez-vous !" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ "Maintenant que nous avons installé " -#~ "toutes nos dépendances, lançons un " -#~ "simple entraînement distribué avec deux " -#~ "clients et un serveur. Notre procédure" -#~ " d'entraînement et l'architecture de notre" -#~ " réseau sont basées sur l'exemple " -#~ "MNIST de base de PyTorch " -#~ "`_. Cela" -#~ " te permettra de voir à quel " -#~ "point il est facile d'envelopper ton " -#~ "code avec Flower et de commencer " -#~ "l'entraînement de manière fédérée. Nous " -#~ "te fournissons deux scripts d'aide, à" -#~ " savoir *run-server.sh*, et *run-" -#~ "clients.sh*. N'aie pas peur de regarder" -#~ " à l'intérieur, ils sont assez " -#~ "simples =)." #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." #~ msgstr "" -#~ "Et voilà ! Tu devrais voir la " -#~ "procédure d'entraînement et, après quelques" -#~ " itérations, la précision du test " -#~ "pour chaque client." -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" -#~ "Dans le script d'aide au serveur " -#~ "*run-server.sh*, tu trouveras le code " -#~ "suivant qui exécute le fichier " -#~ ":code:`server.py`" +#~ "Ce tutoriel te montrera comment utiliser" +#~ " Flower pour construire une version " +#~ "fédérée d'une charge de travail MXNet" +#~ " existante. Nous utilisons MXNet pour " +#~ "former un modèle séquentiel sur " +#~ "l'ensemble de données MNIST. Nous " +#~ "structurerons l'exemple de la même " +#~ "manière que notre présentation `PyTorch " +#~ "- De la centralisation à la " +#~ "fédération `_. " +#~ "MXNet et PyTorch sont très similaires" +#~ " et une très bonne comparaison entre" +#~ " MXNet et PyTorch est donnée ici " +#~ "`_. Tout " +#~ "d'abord, nous construisons une approche " +#~ "de formation centralisée basée sur le" +#~ " tutoriel `Handandwritten Digit Recognition " +#~ "`_." +#~ " Ensuite, nous nous basons sur le " +#~ "code de formation centralisé pour " +#~ "exécuter la formation de manière " +#~ "fédérée." #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" #~ msgstr "" -#~ "Nous pouvons aller un peu plus " -#~ "loin et voir que :code:`server.py` lance" -#~ " simplement un serveur qui coordonnera " -#~ "trois tours de formation. Flower Les " -#~ "serveurs sont très personnalisables, mais " -#~ "pour les charges de travail simples, " -#~ "nous pouvons démarrer un serveur à " -#~ "l'aide de la fonction :ref:`start_server " -#~ "` et " -#~ "laisser toutes les possibilités de " -#~ "configuration à leurs valeurs par " -#~ "défaut, comme on peut le voir " -#~ "ci-dessous." +#~ "Avant de commencer à configurer notre" +#~ " exemple MXNet, nous installons les " +#~ "paquets :code:`mxnet` et :code:`flwr` :" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "Formation MNIST avec MXNet" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." #~ msgstr "" -#~ "Ensuite, jetons un coup d'œil au " -#~ "fichier *run-clients.sh*. Tu verras " -#~ "qu'il contient la boucle principale qui" -#~ " démarre un ensemble de *clients*." +#~ "Nous commençons par une brève " +#~ "description du code d'entraînement centralisé" +#~ " basé sur un modèle :code:`Sequential`. " +#~ "Si tu veux une explication plus " +#~ "approfondie de ce qui se passe, " +#~ "jette un coup d'œil au tutoriel " +#~ "officiel `MXNet " +#~ "`_." #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." #~ msgstr "" -#~ "**cid** : c'est l'identifiant du client." -#~ " C'est un nombre entier qui identifie" -#~ " de façon unique l'identifiant du " -#~ "client." - -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." +#~ "Créons un nouveau fichier appelé " +#~ ":code:`mxnet_mnist.py` avec tous les " +#~ "composants requis pour un apprentissage " +#~ "MNIST traditionnel (centralisé). Tout d'abord," +#~ " le package MXNet :code:`mxnet` doit " +#~ "être importé. Tu peux voir que " +#~ "nous n'avons pas encore importé le " +#~ "package :code:`flwr` pour l'apprentissage " +#~ "fédéré. Cela sera fait plus tard." #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." #~ msgstr "" -#~ "**Cette information n'est pas requise " -#~ "par le client, mais elle nous aide" -#~ " à partitionner l'ensemble de données " -#~ "MNIST original pour nous assurer que " -#~ "chaque client travaille sur des sous-" -#~ "ensembles uniques des ensembles *formation*" -#~ " et *test*." +#~ "La fonction :code:`load_data()` charge les " +#~ "ensembles d'entraînement et de test " +#~ "MNIST." #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." #~ msgstr "" -#~ "Encore une fois, nous pouvons aller " -#~ "plus loin et regarder dans " -#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" -#~ " avoir parcouru le code d'analyse des" -#~ " arguments au début de notre fonction" -#~ " :code:`main`, tu trouveras un appel " -#~ "à :code:`mnist.load_data`. Cette fonction est" -#~ " responsable du partitionnement des " -#~ "ensembles de données MNIST originaux " -#~ "(*training* et *test*) et renvoie un " -#~ ":code:`torch.utils.data.DataLoader` s pour chacun" -#~ " d'entre eux. Nous instancions ensuite " -#~ "un objet :code:`PytorchMNISTClient` avec notre" -#~ " ID client, nos DataLoaders, le " -#~ "nombre d'époques dans chaque tour et " -#~ "le périphérique que nous voulons " -#~ "utiliser pour l'entraînement (CPU ou " -#~ "GPU)." +#~ "Comme nous l'avons déjà mentionné, nous" +#~ " utiliserons l'ensemble de données MNIST" +#~ " pour cette charge de travail " +#~ "d'apprentissage automatique. L'architecture du " +#~ "modèle (un modèle :code:`Séquentiel` très " +#~ "simple) est définie dans :code:`model()`." #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." #~ msgstr "" -#~ "L'objet :code:`PytorchMNISTClient` est finalement" -#~ " transmis à :code:`fl.client.start_client` avec" -#~ " l'adresse du serveur lorsque le " -#~ "processus de formation commence." +#~ "Nous devons maintenant définir la " +#~ "formation (fonction :code:`train()`) qui passe" +#~ " en boucle sur l'ensemble de la " +#~ "formation et mesure la perte pour " +#~ "chaque lot d'exemples de formation." -#~ msgid "A Closer Look" -#~ msgstr "Regarder de plus près" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "" +#~ "L'évaluation du modèle est définie dans" +#~ " la fonction :code:`test()`. Cette fonction" +#~ " passe en boucle sur tous les " +#~ "échantillons de test et mesure la " +#~ "perte et la précision du modèle en" +#~ " fonction de l'ensemble des données " +#~ "de test." #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." #~ msgstr "" -#~ "Maintenant, examinons de près le " -#~ ":code:`PytorchMNISTClient` à l'intérieur du " -#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " -#~ "voyons ce qu'il fait :" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, " +#~ "l'entraînement et l'évaluation, nous pouvons" +#~ " tout assembler et entraîner notre " +#~ "modèle sur MNIST. Note que le " +#~ "dispositif GPU/CPU pour l'entraînement et " +#~ "le test est défini dans le " +#~ ":code:`ctx` (contexte)." -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" #~ msgstr "" -#~ "La première chose à remarquer est " -#~ "que :code:`PytorchMNISTClient` instancie un " -#~ "modèle CNN dans son constructeur" +#~ "Tu peux maintenant exécuter ta charge" +#~ " de travail (centralisée) d'apprentissage " +#~ "automatique MXNet :" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." #~ msgstr "" -#~ "Le code du CNN est disponible sous" -#~ " :code:`quickstart-pytorch.mnist` et il est" -#~ " reproduit ci-dessous. Il s'agit du" -#~ " même réseau que celui que l'on " -#~ "trouve dans `Exemple basique de MNIST" -#~ " `_." +#~ "Jusqu'à présent, tout cela devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé MXNet (ou même PyTorch)." +#~ " Passons à l'étape suivante et " +#~ "utilisons ce que nous avons construit" +#~ " pour créer un simple système " +#~ "d'apprentissage fédéré composé d'un serveur" +#~ " et de deux clients." + +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet rencontre Flower" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." #~ msgstr "" -#~ "La deuxième chose à noter est que" -#~ " la classe :code:`PytorchMNISTClient` hérite " -#~ "de :code:`fl.client.Client`, et qu'elle doit" -#~ " donc implémenter les méthodes suivantes" -#~ " :" +#~ "Jusqu'à présent, il n'était pas facile" +#~ " d'utiliser les charges de travail " +#~ "MXNet pour l'apprentissage fédéré car " +#~ "l'apprentissage fédéré n'est pas pris en" +#~ " charge dans MXNet. Comme Flower est" +#~ " totalement agnostique vis-à-vis du cadre" +#~ " d'apprentissage automatique sous-jacent, " +#~ "il peut être utilisé pour fédérer " +#~ "des charges de travail d'apprentissage " +#~ "automatique arbitraires. Cette section te " +#~ "montrera comment Flower peut être " +#~ "utilisé pour fédérer notre charge de " +#~ "travail MXNet centralisée." #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." #~ msgstr "" -#~ "En comparant la classe abstraite à " -#~ "sa classe dérivée :code:`PytorchMNISTClient`, " -#~ "tu remarqueras que :code:`fit` appelle " -#~ "une fonction :code:`train` et que " -#~ ":code:`evaluate` appelle une fonction " -#~ ":code:`test` :." +#~ "Le concept pour fédérer une charge " +#~ "de travail existante est toujours le " +#~ "même et facile à comprendre. Nous " +#~ "devons démarrer un *serveur* et ensuite" +#~ " utiliser le code dans " +#~ ":code:`mxnet_mnist.py` pour les *clients* qui" +#~ " sont connectés au *serveur*. Le " +#~ "*serveur* envoie les paramètres du " +#~ "modèle aux clients. Les *clients* " +#~ "exécutent la formation et mettent à " +#~ "jour les paramètres. Les paramètres mis" +#~ " à jour sont renvoyés au *serveur*" +#~ " qui fait la moyenne de toutes " +#~ "les mises à jour de paramètres " +#~ "reçues. Ceci décrit un tour du " +#~ "processus d'apprentissage fédéré et nous " +#~ "répétons cette opération pour plusieurs " +#~ "tours." #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" #~ msgstr "" -#~ "Ces fonctions se trouvent toutes deux" -#~ " dans le même module :code:`quickstart-" -#~ "pytorch.mnist` :" +#~ "Enfin, nous allons définir la logique" +#~ " de notre *client* dans :code:`client.py`" +#~ " et nous appuyer sur l'entraînement " +#~ "MXNet défini précédemment dans " +#~ ":code:`mxnet_mnist.py`. Notre *client* doit " +#~ "importer :code:`flwr`, mais aussi " +#~ ":code:`mxnet` pour mettre à jour les " +#~ "paramètres de notre modèle MXNet :" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" #~ msgstr "" -#~ "Observe que ces fonctions encapsulent " -#~ "les boucles d'entraînement et de test" -#~ " habituelles et fournissent à :code:`fit`" -#~ " et :code:`evaluate` les statistiques " -#~ "finales pour chaque tour. Tu pourrais" -#~ " les remplacer par tes boucles " -#~ "d'entraînement et de test personnalisées " -#~ "et changer l'architecture du réseau, et" -#~ " l'ensemble de l'exemple fonctionnerait " -#~ "toujours parfaitement. En fait, pourquoi " -#~ "ne pas essayer de modifier le code" -#~ " pour en faire un exemple qui " -#~ "te plairait ?" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" -#~ msgid "Give It a Try" -#~ msgstr "Fais un essai" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" -#~ msgstr "" -#~ "En parcourant la description du code " -#~ "de démarrage rapide ci-dessus, tu " -#~ "auras acquis une bonne compréhension du" -#~ " fonctionnement des *clients* et des " -#~ "*serveurs* dans Flower, de l'exécution " -#~ "d'une expérience simple et de la " -#~ "structure interne d'un wrapper client. " -#~ "Voici quelques exemples que tu peux " -#~ "essayer par toi-même pour acquérir " -#~ "plus d'expérience avec Flower :" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" +#~ "La partie la plus difficile est de" +#~ " transformer les paramètres MXNet de " +#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" +#~ " les rendre lisibles pour Flower." #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ "Essaie de modifier :code:`PytorchMNISTClient` " -#~ "pour qu'il puisse accepter différentes " -#~ "architectures." +#~ "Les deux méthodes :code:`NumPyClient` " +#~ ":code:`fit` et :code:`evaluate` utilisent les" +#~ " fonctions :code:`train()` et :code:`test()` " +#~ "définies précédemment dans :code:`mxnet_mnist.py`." +#~ " Ce que nous faisons vraiment ici," +#~ " c'est que nous indiquons à Flower," +#~ " par le biais de notre sous-" +#~ "classe :code:`NumPyClient`, laquelle de nos" +#~ " fonctions déjà définies doit être " +#~ "appelée pour l'entraînement et l'évaluation." +#~ " Nous avons inclus des annotations de" +#~ " type pour te donner une meilleure" +#~ " compréhension des types de données " +#~ "qui sont transmis." #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" -#~ "Modifie la fonction :code:`train` pour " -#~ "qu'elle accepte différents optimiseurs" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, la " +#~ "formation et l'évaluation, nous pouvons " +#~ "tout rassembler et former notre modèle" +#~ " :code:`Sequential` sur MNIST." #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" -#~ "Modifie la fonction :code:`test` pour " -#~ "qu'elle prouve non seulement le top-1" -#~ " (précision normale) mais aussi le " -#~ "top-5 ?" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur est toujours en cours " +#~ "d'exécution avant de le faire) et " +#~ "tu verras ton projet MXNet exécuter " +#~ "l'apprentissage fédéré sur deux clients. " +#~ "Félicitations !" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" -#~ "Essaie d'adapter le code à des " -#~ "images et à des ensembles de " -#~ "données plus grands. Pourquoi ne pas " -#~ "essayer de s'entraîner sur ImageNet avec" -#~ " un ResNet-50 ?" +#~ "Le code source complet de cet " +#~ "exemple : `MXNet : From Centralized " +#~ "To Federated (Code) " +#~ "`_. Notre " +#~ "exemple est bien sûr un peu trop" +#~ " simplifié parce que les deux clients" +#~ " chargent exactement le même ensemble " +#~ "de données, ce qui n'est pas " +#~ "réaliste. Tu es maintenant prêt à " +#~ "explorer ce sujet plus en profondeur." +#~ " Pourquoi ne pas utiliser un CNN " +#~ "ou un ensemble de données différent " +#~ "? Pourquoi ne pas ajouter d'autres " +#~ "clients ?" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" +#~ msgid "with the following command sequence:" +#~ msgstr "avec la séquence de commandes suivante :" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." #~ msgstr "" -#~ "Flower fournit des classes d'enveloppe " -#~ "de confidentialité différentielle (DP) pour" -#~ " l'intégration facile des garanties " -#~ "centrales de DP fournies par DP-" -#~ "FedAvg dans les pipelines de formation" -#~ " définis dans n'importe lequel des " -#~ "divers cadres de ML avec lesquels " -#~ "Flower est compatible." +#~ "Si tu es un chercheur, tu peux " +#~ "très bien utiliser les certificats " +#~ "auto-signés générés à l'aide des " +#~ "scripts qui font partie de ce " +#~ "guide." #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." #~ msgstr "" -#~ "Note que ces composants sont encore " -#~ "expérimentaux, la configuration correcte du" -#~ " DP pour une tâche spécifique est " -#~ "encore un problème non résolu." +#~ "Nous allons maintenant montrer comment " +#~ "écrire un serveur qui utilise les " +#~ "scripts générés précédemment." #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" -#~ "Le nom DP-FedAvg est trompeur car" -#~ " il peut être appliqué à n'importe" -#~ " quel algorithme FL qui se conforme" -#~ " à la structure générale prescrite " -#~ "par la famille d'algorithmes FedOpt." - -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ "Lorsqu'il fournit des certificats, le " +#~ "serveur attend un tuple de trois " +#~ "certificats. :code:`Path` peut être utilisé" +#~ " pour lire facilement le contenu de" +#~ " ces fichiers en chaînes d'octets, ce" +#~ " qui est le type de données " +#~ "attendu par :code:`start_server`." #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ "DP-FedAvg, proposé à l'origine par " -#~ "McMahan et al. [mcmahan]_ et étendu " -#~ "par Andrew et al. [andrew]_, est " -#~ "essentiellement FedAvg avec les modifications" -#~ " suivantes." + +#~ msgid "Flower server" +#~ msgstr "Serveur de Flower" #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" -#~ "**Clipping** : L'influence de la mise" -#~ " à jour de chaque client est " -#~ "limitée en l'écrêtant. Ceci est réalisé" -#~ " en imposant un plafond à la " -#~ "norme L2 de la mise à jour, " -#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" -#~ "**Bruit** : un bruit gaussien, calibré" -#~ " sur le seuil d'écrêtage, est ajouté" -#~ " à la moyenne calculée au niveau " -#~ "du serveur." #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "" -#~ "Il a été démontré que la " -#~ "distribution de la norme de mise à" -#~ " jour varie d'une tâche à l'autre " -#~ "et évolue au fur et à mesure " -#~ "de la formation. C'est pourquoi nous " -#~ "utilisons une approche adaptative [andrew]_" -#~ " qui ajuste continuellement le seuil " -#~ "d'écrêtage pour suivre un quantile " -#~ "prédéfini de la distribution de la " -#~ "norme de mise à jour." +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" -#~ msgid "Simplifying Assumptions" -#~ msgstr "Simplifier les hypothèses" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." +#~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" #~ msgstr "" -#~ "Nous formulons (et tentons d'appliquer) " -#~ "un certain nombre d'hypothèses qui " -#~ "doivent être satisfaites pour que le " -#~ "processus de formation réalise réellement " -#~ "les garanties :math:`(\\epsilon, \\delta)` que" -#~ " l'utilisateur a à l'esprit lorsqu'il " -#~ "configure l'installation." #~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" -#~ "**Sous-échantillonnage de taille fixe** " -#~ ":Des sous-échantillons de taille fixe" -#~ " des clients doivent être prélevés à" -#~ " chaque tour, par opposition aux " -#~ "sous-échantillons de Poisson de taille " -#~ "variable." #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" -#~ "**Moyenne non pondérée** : Les " -#~ "contributions de tous les clients " -#~ "doivent être pondérées de façon égale" -#~ " dans l'ensemble afin que le serveur" -#~ " n'ait pas à connaître à l'avance " -#~ "la somme des poids de tous les " -#~ "clients disponibles pour la sélection." #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." #~ msgstr "" -#~ "**Aucune défaillance de client** : " -#~ "L'ensemble des clients disponibles doit " -#~ "rester constant pendant toutes les " -#~ "séries de formation. En d'autres termes," -#~ " les clients ne peuvent pas " -#~ "abandonner ou échouer." #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." #~ msgstr "" -#~ "Les deux premiers sont utiles pour " -#~ "éliminer une multitude de complications " -#~ "liées au calibrage du bruit en " -#~ "fonction du seuil d'écrêtage, tandis que" -#~ " le troisième est nécessaire pour se" -#~ " conformer aux hypothèses de l'analyse " -#~ "de la vie privée." #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." #~ msgstr "" -#~ "Ces restrictions sont conformes aux " -#~ "contraintes imposées par Andrew et al." -#~ " [andrew]_." -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" +#~ msgid "Using a different Flower or Python version" +#~ msgstr "" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ "Contrairement à d'autres implémentations où" -#~ " l'ajout de bruit est effectué au " -#~ "niveau du serveur, tu peux configurer" -#~ " le site d'injection de bruit pour" -#~ " qu'il corresponde mieux à ton modèle" -#~ " de menace. Nous offrons aux " -#~ "utilisateurs la possibilité de configurer " -#~ "l'entraînement de telle sorte que chaque" -#~ " client ajoute indépendamment une petite" -#~ " quantité de bruit à la mise à" -#~ " jour écrêtée, ce qui fait que " -#~ "le simple fait d'agréger les mises " -#~ "à jour bruyantes équivaut à l'ajout " -#~ "explicite de bruit à l'agrégat non " -#~ "bruyant au niveau du serveur." #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" #~ msgstr "" -#~ "Pour être précis, si nous laissons " -#~ ":math:`m` être le nombre de clients " -#~ "échantillonnés à chaque tour et " -#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" -#~ " gaussien total qui doit être ajouté" -#~ " à la somme des mises à jour" -#~ " du modèle, nous pouvons utiliser des" -#~ " mathématiques simples pour montrer que " -#~ "cela équivaut à ce que chaque " -#~ "client ajoute du bruit avec l'échelle" -#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." -#~ msgid "Wrapper-based approach" -#~ msgstr "Approche basée sur l'enveloppe" +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." -#~ msgstr "" -#~ "L'introduction du DP dans une charge " -#~ "de travail existante peut être " -#~ "considérée comme l'ajout d'une couche de" -#~ " sécurité supplémentaire autour d'elle. " -#~ "Cela nous a incités à fournir la" -#~ " logique supplémentaire côté serveur et " -#~ "côté client nécessaire pour rendre le" -#~ " processus de formation différentiellement " -#~ "privé en tant qu'enveloppes pour les " -#~ "instances des classes abstraites " -#~ ":code:`Strategy` et :code:`NumPyClient` " -#~ "respectivement. Cette approche basée sur " -#~ "l'enveloppe a l'avantage d'être facilement " -#~ "composable avec d'autres enveloppes que " -#~ "quelqu'un pourrait contribuer à la " -#~ "bibliothèque Flower à l'avenir, par " -#~ "exemple, pour l'agrégation sécurisée. " -#~ "L'utilisation de l'héritage à la place" -#~ " peut être fastidieuse car cela " -#~ "nécessiterait la création de nouvelles " -#~ "sous-classes chaque fois qu'une nouvelle" -#~ " classe mettant en œuvre :code:`Strategy`" -#~ " ou :code:`NumPyClient` est définie." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" #~ msgstr "" -#~ "La première version de notre solution" -#~ " consistait à définir un décorateur " -#~ "dont le constructeur acceptait, entre " -#~ "autres, une variable à valeur booléenne" -#~ " indiquant si l'écrêtage adaptatif devait" -#~ " être activé ou non. Nous nous " -#~ "sommes rapidement rendu compte que cela" -#~ " encombrerait sa fonction :code:`__init__()` " -#~ "avec des variables correspondant aux " -#~ "hyperparamètres de l'écrêtage adaptatif qui" -#~ " resteraient inutilisées lorsque celui-ci" -#~ " était désactivé. Une implémentation plus" -#~ " propre pourrait être obtenue en " -#~ "divisant la fonctionnalité en deux " -#~ "décorateurs, :code:`DPFedAvgFixed` et " -#~ ":code:`DPFedAvgAdaptive`, le second sous-" -#~ "classant le premier. Les constructeurs " -#~ "des deux classes acceptent un paramètre" -#~ " booléen :code:`server_side_noising` qui, comme" -#~ " son nom l'indique, détermine l'endroit " -#~ "où le noising doit être effectué." #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" #~ msgstr "" -#~ "Les capacités côté serveur requises pour" -#~ " la version originale de DP-FedAvg," -#~ " c'est-à-dire celle qui effectue un " -#~ "écrêtage fixe, peuvent être entièrement " -#~ "capturées à l'aide d'une logique " -#~ "d'enveloppement pour les deux méthodes " -#~ "suivantes de la classe abstraite " -#~ ":code:`Strategy`." #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" #~ msgstr "" -#~ ":code:`configure_fit()` : Le dictionnaire de" -#~ " configuration envoyé par la " -#~ ":code:`Strategy` enveloppée à chaque client" -#~ " doit être augmenté d'une valeur " -#~ "supplémentaire égale au seuil d'écrêtage " -#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " -#~ "si :code:`server_side_noising=true`, d'une autre " -#~ "égale à l'échelle du bruit gaussien " -#~ "qui doit être ajouté au client " -#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid "Run Flower server (Driver API and Fleet API)." #~ msgstr "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ "Nous ne pouvons pas modifier directement" -#~ " la fonction d'agrégation de la " -#~ "stratégie enveloppée pour la forcer à" -#~ " ajouter du bruit à l'agrégat, c'est" -#~ " pourquoi nous simulons le bruit côté" -#~ " client pour mettre en œuvre le " -#~ "bruit côté serveur." + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "Tout d'abord, démarre un serveur Flower :" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" +#~ "Flower 1.0 : ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" + +#~ msgid "`Driver` class provides an interface to the Driver API." #~ msgstr "" -#~ "Ces modifications ont été regroupées " -#~ "dans une classe appelée :code:`DPFedAvgFixed`," -#~ " dont le constructeur accepte la " -#~ "stratégie décorée, le seuil d'écrêtage " -#~ "et le nombre de clients échantillonnés" -#~ " à chaque tour comme arguments " -#~ "obligatoires. L'utilisateur est censé " -#~ "spécifier le seuil d'écrêtage car " -#~ "l'ordre de grandeur des normes de " -#~ "mise à jour dépend fortement du " -#~ "modèle formé et fournir une valeur " -#~ "par défaut serait trompeur. Le nombre" -#~ " de clients échantillonnés à chaque " -#~ "tour est nécessaire pour calculer la " -#~ "quantité de bruit qui doit être " -#~ "ajoutée à chaque mise à jour " -#~ "individuelle, que ce soit par le " -#~ "serveur ou par les clients." #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "" + +#~ msgid "Disconnect from the SuperLink if connected." #~ msgstr "" -#~ "La fonctionnalité supplémentaire nécessaire " -#~ "pour faciliter l'écrêtage adaptatif a " -#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" -#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" -#~ " remplace les méthodes mentionnées ci-" -#~ "dessus pour effectuer les opérations " -#~ "suivantes." #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" #~ msgstr "" -#~ ":code:`configure_fit()` : Il intercepte le " -#~ "dict de configuration renvoyé par " -#~ ":code:`super.configure_fit()` pour y ajouter " -#~ "la paire clé-valeur " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " -#~ "client interprète comme une instruction " -#~ "d'inclure un bit indicateur (1 si " -#~ "la norme de mise à jour <= " -#~ "seuil d'écrêtage, 0 sinon) dans les " -#~ "résultats qu'il renvoie." #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." +#~ msgstr "" + +#~ msgid "start\\_driver" +#~ msgstr "start_client" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." #~ msgstr "" -#~ ":code:`aggregate_fit()` : Il fait suivre " -#~ "un appel à :code:`super.aggregate_fit()` d'un" -#~ " appel à :code:`__update_clip_norm__()`, une " -#~ "procédure qui ajuste le seuil d'écrêtage" -#~ " sur la base des bits indicateurs " -#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." #~ msgstr "" -#~ "Les capacités requises côté client " -#~ "peuvent être entièrement capturées par " -#~ "une logique de wrapper pour la " -#~ "seule méthode :code:`fit()` de la classe" -#~ " abstraite :code:`NumPyClient`. Pour être " -#~ "précis, nous devons *post-traiter* la" -#~ " mise à jour calculée par le " -#~ "client wrapped pour l'écrêter, si " -#~ "nécessaire, à la valeur seuil fournie" -#~ " par le serveur dans le cadre " -#~ "du dictionnaire de configuration. En " -#~ "plus de cela, il peut avoir besoin" -#~ " d'effectuer un travail supplémentaire si" -#~ " l'une des clés suivantes (ou les " -#~ "deux) est également présente dans le " -#~ "dict." #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." #~ msgstr "" -#~ ":code:`dpfedavg_noise_stddev` : Génère et " -#~ "ajoute la quantité de bruit spécifiée" -#~ " à la mise à jour de " -#~ "l'écrêtage." -#~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ msgid "The Driver object to use." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " -#~ "les métriques dict dans l'objet " -#~ ":code:`FitRes` renvoyé au serveur avec " -#~ "un bit indicateur, calculé comme décrit" -#~ " précédemment." -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" #~ msgstr "" -#~ "Supposons que tu te sois entraîné " -#~ "pendant :math:`n` tours avec la fraction" -#~ " d'échantillonnage :math:`q` et le " -#~ "multiplicateur de bruit :math:`z`. Afin " -#~ "de calculer la valeur :math:`epsilon` " -#~ "qui en résulterait pour un " -#~ ":math:`\\delta` particulier, le script suivant" -#~ " peut être utilisé." + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "Simulation de moniteur" #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "Quickstart MXNet" +#~ msgstr "Démarrage rapide de MXNet" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un modèle :code:`Sequential` " +#~ "sur MNIST à l'aide de Flower et" +#~ " de MXNet." -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, lançons une formation " +#~ "distribuée simple avec deux clients et" +#~ " un serveur. Notre procédure de " +#~ "formation et l'architecture du réseau " +#~ "sont basées sur le tutoriel de " +#~ "reconnaissance de chiffres écrits à la" +#~ " main du MXNet " +#~ "`_." -#~ msgid "Flower driver SDK." -#~ msgstr "Serveur de Flower" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "au MXNet :" -#~ msgid "driver" -#~ msgstr "serveur" +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." #~ msgstr "" +#~ "Nous utilisons MXNet pour charger MNIST," +#~ " un ensemble de données de " +#~ "classification d'images populaire de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " +#~ "télécharge les données d'entraînement et " +#~ "de test." #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." #~ msgstr "" +#~ "Définis l'entraînement et la perte avec" +#~ " MXNet. Nous entraînons le modèle en" +#~ " parcourant en boucle l'ensemble des " +#~ "données, nous mesurons la perte " +#~ "correspondante et nous l'optimisons." #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." #~ msgstr "" +#~ "Ensuite, nous définissons la validation " +#~ "de notre modèle d'apprentissage automatique." +#~ " Nous effectuons une boucle sur " +#~ "l'ensemble de test et mesurons à " +#~ "la fois la perte et la précision" +#~ " sur l'ensemble de test." -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." #~ msgstr "" +#~ "Après avoir défini la formation et " +#~ "le test d'un modèle d'apprentissage " +#~ "automatique MXNet, nous utilisons ces " +#~ "fonctions pour mettre en œuvre un " +#~ "client Flower." -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" +#~ "Après avoir chargé l'ensemble de données" +#~ " avec :code:`load_data()`, nous effectuons " +#~ "une propagation vers l'avant pour " +#~ "initialiser le modèle et les paramètres" +#~ " du modèle avec :code:`model(init)`. " +#~ "Ensuite, nous implémentons un client " +#~ "Flower." #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "l'implémentation de l'interface :code:`Client` " +#~ "lorsque ta charge de travail utilise " +#~ "MXNet. L'implémentation de :code:`NumPyClient` " +#~ "signifie généralement la définition des " +#~ "méthodes suivantes (:code:`set_parameters` est " +#~ "cependant facultatif) :" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" -#~ msgid "Get task results." +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`MNISTClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"0.0.0:8080\"` indique au client à " +#~ "quel serveur se connecter. Dans notre" +#~ " cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" +#~ " charge de travail véritablement fédérée" +#~ " avec le serveur et les clients " +#~ "s'exécutant sur des machines différentes, " +#~ "tout ce qui doit changer est " +#~ ":code:`server_address` que nous transmettons " +#~ "au client." -#~ msgid "Schedule tasks." +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" #~ msgstr "" +#~ "Le client et le serveur étant " +#~ "prêts, nous pouvons maintenant tout " +#~ "exécuter et voir l'apprentissage fédéré " +#~ "en action. Les systèmes d'apprentissage " +#~ "fédéré ont généralement un serveur et" +#~ " plusieurs clients. Nous devons donc " +#~ "commencer par démarrer le serveur :" -#~ msgid "GrpcDriver" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-mxnet`." -#~ msgid ":py:obj:`connect `\\ \\(\\)" -#~ msgstr "" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" -#~ msgid "Connect to the Driver API." -#~ msgstr "" +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" -#~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" -#~ msgstr "" +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" -#~ msgid "Request for run ID." -#~ msgstr "Demande pour une nouvelle Flower Baseline" +#~ msgid "Shuffles data and its label" +#~ msgstr "Mélange les données et leur étiquette" -#~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" -#~ msgstr "" +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" -#~ msgid "Disconnect from the Driver API." -#~ msgstr "" +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" #~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" +#~ "Nous chargeons l'ensemble de données " +#~ "MNIST de `OpenML `_," +#~ " un ensemble de données de " +#~ "classification d'images populaires de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " +#~ "les données d'entraînement et de test." +#~ " L'ensemble d'entraînement est ensuite " +#~ "divisé en 10 partitions avec " +#~ ":code:`utils.partition()`." -#~ msgid "Get client IDs." -#~ msgstr "Moteur client Edge" +#~ msgid "Let's get stated!" +#~ msgstr "Allons-y, déclarons-le !" -#~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" #~ msgstr "" -#~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" #~ msgstr "" -#~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" #~ msgstr "" -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ "Les exemples d'utilisation de Flower " -#~ "étaient auparavant regroupés avec Flower " -#~ "dans un paquet appelé ``flwr_example``. " -#~ "Nous migrons ces exemples vers des " -#~ "projets autonomes pour les rendre plus" -#~ " faciles à utiliser. Tous les " -#~ "nouveaux exemples sont basés dans le " -#~ "répertoire ``examples " -#~ "`_." - -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "Démarrage rapide de TensorFlow/Keras" - -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "Exemples hérités (`flwr_example`)" -#~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" #~ msgstr "" -#~ "Les exemples d'utilisation dans `flwr_example`" -#~ " sont obsolètes et seront supprimés à" -#~ " l'avenir. De nouveaux exemples sont " -#~ "fournis en tant que projets autonomes" -#~ " dans `examples " -#~ "`_." - -#~ msgid "Extra Dependencies" -#~ msgstr "Dépendances supplémentaires" -#~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ msgid "|7f0ee162da38450788493a21627306f7|" #~ msgstr "" -#~ "Le noyau du framework Flower conserve" -#~ " un ensemble minimal de dépendances. " -#~ "Les exemples démontrent Flower dans le" -#~ " contexte de différents frameworks " -#~ "d'apprentissage automatique, de sorte que " -#~ "des dépendances supplémentaires doivent être" -#~ " installées avant qu'un exemple puisse " -#~ "être exécuté." - -#~ msgid "For PyTorch examples::" -#~ msgstr "Pour les exemples de PyTorch: :" - -#~ msgid "For TensorFlow examples::" -#~ msgstr "Pour les exemples de TensorFlow : :" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" - -#~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" #~ msgstr "" -#~ "Tu peux consulter :code:`pyproject.toml` pour" -#~ " une liste complète des extras " -#~ "possibles (section :code:`[tool.poetry.extras]`)." - -#~ msgid "PyTorch Examples" -#~ msgstr "Exemples de PyTorch" -#~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" #~ msgstr "" -#~ "Nos exemples PyTorch sont basés sur " -#~ "PyTorch 1.7. Ils devraient fonctionner " -#~ "avec d'autres versions également. Jusqu'à " -#~ "présent, nous fournissons les exemples " -#~ "suivants." - -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "Classification d'images CIFAR-10" -#~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" #~ msgstr "" -#~ "`CIFAR-10 et CIFAR-100 " -#~ "`_ sont des" -#~ " ensembles de données d'images RVB " -#~ "populaires. L'exemple Flower CIFAR-10 utilise" -#~ " PyTorch pour former un classificateur " -#~ "CNN simple dans une configuration " -#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "First, start a Flower server:" -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" #~ msgstr "" -#~ "Ensuite, démarre les deux clients dans" -#~ " une nouvelle fenêtre de terminal :" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "" +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub `_" +#~ " that include all necessary dependencies" +#~ " for running the SuperLink. You can" +#~ " also build your own custom docker" +#~ " images from scratch with a different" +#~ " version of Python or Ubuntu if " +#~ "that is what you need. In this " +#~ "guide, we will explain what images " +#~ "exist and how to build them " +#~ "locally." +#~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 Classification des images" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "Both, base and SuperLink image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ "`ImageNet-2012 `_ est " -#~ "l'un des principaux ensembles de données" -#~ " de vision par ordinateur. L'exemple " -#~ "Flower ImageNet utilise PyTorch pour " -#~ "entraîner un classificateur ResNet-18 dans " -#~ "une configuration d'apprentissage fédéré avec" -#~ " dix clients." -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "``3.11``" +#~ msgstr "1.0.0rc1" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "``UBUNTU_VERSION``" +#~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "Version of the official Ubuntu Docker image." #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgid "TensorFlow Examples" -#~ msgstr "Exemples de TensorFlow" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ "The following example creates a base " +#~ "image with Python 3.11.0, pip 23.0.1 " +#~ "and setuptools 69.0.2:" #~ msgstr "" -#~ "Nos exemples TensorFlow sont basés sur" -#~ " TensorFlow 2.0 ou une version plus" -#~ " récente. Jusqu'à présent, nous te " -#~ "proposons les exemples suivants." -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Classification d'images Fashion-MNIST" +#~ msgid "Building the SuperLink image" +#~ msgstr "Démarrer le serveur" -#~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ msgid "Defaults to ``flwr/base``." #~ msgstr "" -#~ "nous suivons cette tradition et " -#~ "fournissons un exemple qui échantillonne " -#~ "des ensembles de données locales " -#~ "aléatoires de Fashion-MNIST et entraîne" -#~ " un modèle simple de classification " -#~ "d'images sur ces partitions." -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "The Python version of the base image." +#~ msgstr "Évaluer la réponse d'un client." -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "" -#~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgid "Defaults to ``ubuntu22.04``." #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ msgid "The PyPI package to install." #~ msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Flux de travail" + #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "The following example creates a " +#~ "SuperLink image with the official Flower" +#~ " base image py3.11-ubuntu22.04 and Flower" +#~ " 1.8.0:" #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY``, " +#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " +#~ "arguments." #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" -#~ msgstr "" +#~ msgid "Creating New Messages" +#~ msgstr "Création de nouveaux messages" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." #~ msgstr "" +#~ "Voici un guide simple pour créer " +#~ "un nouveau type de message entre " +#~ "le serveur et les clients dans " +#~ "Flower." -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." #~ msgstr "" +#~ "Supposons que nous ayons les fonctions" +#~ " suivantes dans :code:`server.py` et " +#~ ":code:`numpy_client.py`..." -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "Server's side:" +#~ msgstr "Côté serveur :" + +#~ msgid "Client's side:" +#~ msgstr "Côté client :" + +#~ msgid "" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" #~ msgstr "" +#~ "Voyons maintenant ce que nous devons " +#~ "mettre en œuvre pour que cette " +#~ "simple fonction entre le serveur et " +#~ "le client fonctionne !" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "Types de messages pour les tampons de protocole" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" +#~ "La première chose à faire est de" +#~ " définir un type de message pour " +#~ "le système RPC dans :code:`transport.proto`." +#~ " Notez que nous devons le faire " +#~ "à la fois pour les messages de " +#~ "demande et de réponse. Pour plus " +#~ "de détails sur la syntaxe de " +#~ "proto3, veuillez consulter la `documentation" +#~ " officielle `_." + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "Dans le bloc :code:`ServerMessage` :" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "Dans le bloc ClientMessage :" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." #~ msgstr "" +#~ "Veille à ajouter également un champ " +#~ "du type de message nouvellement créé " +#~ "dans :code:`oneof msg`." -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" -#~ msgstr "" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" -#~ msgstr "" +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" -#~ msgstr "" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "Fonctions de sérialisation et de désérialisation" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." #~ msgstr "" +#~ "La prochaine étape consiste à ajouter" +#~ " des fonctions pour sérialiser et " +#~ "désérialiser les types de données Python" +#~ " vers ou à partir des types de" +#~ " messages RPC définis. Tu dois " +#~ "ajouter ces fonctions dans :code:`serde.py`." -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" -#~ msgstr "" +#~ msgid "The four functions:" +#~ msgstr "Les quatre fonctions :" -#~ msgid "|08cb60859b07461588fe44e55810b050|" -#~ msgstr "" +#~ msgid "Sending the Message from the Server" +#~ msgstr "Envoi du message à partir du serveur" #~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub " -#~ "`_ that include" -#~ " all necessary dependencies for running " -#~ "the server. You can also build " -#~ "your own custom docker images from " -#~ "scratch with a different version of " -#~ "Python or Ubuntu if that is what" -#~ " you need. In this guide, we " -#~ "will explain what images exist and " -#~ "how to build them locally." +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" #~ msgstr "" +#~ "Écris maintenant la fonction de demande" +#~ " dans ta classe Client Proxy (par " +#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" +#~ " les fonctions serde que tu viens " +#~ "de créer :" -#~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " base image and a server image. " -#~ "There will also be a client image" -#~ " soon. The base image, as the " -#~ "name suggests, contains basic dependencies " -#~ "that both the server and the " -#~ "client need. This includes system " -#~ "dependencies, Python and Python tools. " -#~ "The server image is based on the" -#~ " base image, but it additionally " -#~ "installs the Flower server using " -#~ "``pip``." -#~ msgstr "" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "Réception du message par le client" #~ msgid "" -#~ "Both, base and server image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." -#~ msgstr "" - -#~ msgid "Defaults to ``flwr/server``." +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" +#~ "Dernière étape ! Modifie le code " +#~ "dans :code:`message_handler.py` pour vérifier " +#~ "le champ de ton message et appeler" +#~ " la fonction :code:`example_response`. N'oublie" +#~ " pas d'utiliser les fonctions serde !" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "" +#~ msgid "Within the handle function:" +#~ msgstr "Dans le cadre de la fonction de poignée :" -#~ msgid "The image tag of the base image." -#~ msgstr "" +#~ msgid "And add a new function:" +#~ msgstr "Et ajoute une nouvelle fonction :" -#~ msgid "Defaults to ``py3.11-ubuntu22.04``." +#~ msgid "Hopefully, when you run your program you will get the intended result!" #~ msgstr "" +#~ "Avec un peu de chance, lorsque tu" +#~ " exécuteras ton programme, tu obtiendras" +#~ " le résultat escompté !" #~ msgid "" -#~ "The following example creates a server" -#~ " image with the official Flower base" -#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." #~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_server`` " -#~ "and the tag ``0.1.0``. Remember that " -#~ "the build arguments as well as the" -#~ " name and tag can be adapted to" -#~ " your needs. These values serve as" -#~ " examples only." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY`` " -#~ "and ``BASE_IMAGE_TAG`` build arguments. The" -#~ " value of ``BASE_REPOSITORY`` must match" -#~ " the name of your image and the" -#~ " value of ``BASE_IMAGE_TAG`` must match " -#~ "the tag of your image." +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." #~ msgstr "" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" - #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" - -#~ msgid "Changelog entry" -#~ msgstr "Changelog" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." #~ msgstr "" -#~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" -#~ msgstr "" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "flower-fleet-api" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" #~ msgstr "" -#~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" #~ msgstr "" +#~ msgid "Edge Client Engine" +#~ msgstr "Moteur client Edge" + #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" #~ msgstr "" +#~ "`Flower `_ architecture de " +#~ "base avec Edge Client Engine" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" +#~ msgid "Virtual Client Engine" +#~ msgstr "Moteur de client virtuel" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" #~ msgstr "" -#~ "Ce tutoriel te montrera comment utiliser" -#~ " Flower pour construire une version " -#~ "fédérée d'une charge de travail MXNet" -#~ " existante. Nous utilisons MXNet pour " -#~ "former un modèle séquentiel sur " -#~ "l'ensemble de données MNIST. Nous " -#~ "structurerons l'exemple de la même " -#~ "manière que notre présentation `PyTorch " -#~ "- De la centralisation à la " -#~ "fédération `_. " -#~ "MXNet et PyTorch sont très similaires" -#~ " et une très bonne comparaison entre" -#~ " MXNet et PyTorch est donnée ici " -#~ "`_. Tout " -#~ "d'abord, nous construisons une approche " -#~ "de formation centralisée basée sur le" -#~ " tutoriel `Handandwritten Digit Recognition " -#~ "`_." -#~ " Ensuite, nous nous basons sur le " -#~ "code de formation centralisé pour " -#~ "exécuter la formation de manière " -#~ "fédérée." +#~ "`Flower `_ architecture de " +#~ "base avec moteur de client virtuel" -#~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" #~ msgstr "" -#~ "Avant de commencer à configurer notre" -#~ " exemple MXNet, nous installons les " -#~ "paquets :code:`mxnet` et :code:`flwr` :" - -#~ msgid "MNIST Training with MXNet" -#~ msgstr "Formation MNIST avec MXNet" +#~ "Moteur client virtuel et moteur client" +#~ " Edge dans la même charge de " +#~ "travail" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" #~ msgstr "" -#~ "Nous commençons par une brève " -#~ "description du code d'entraînement centralisé" -#~ " basé sur un modèle :code:`Sequential`. " -#~ "Si tu veux une explication plus " -#~ "approfondie de ce qui se passe, " -#~ "jette un coup d'œil au tutoriel " -#~ "officiel `MXNet " -#~ "`_." +#~ "`Flower `_ architecture de " +#~ "base avec un moteur de client " +#~ "virtuel et un moteur de client " +#~ "périphérique" -#~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ msgid "How to build Docker Flower images locally" #~ msgstr "" -#~ "Créons un nouveau fichier appelé " -#~ ":code:`mxnet_mnist.py` avec tous les " -#~ "composants requis pour un apprentissage " -#~ "MNIST traditionnel (centralisé). Tout d'abord," -#~ " le package MXNet :code:`mxnet` doit " -#~ "être importé. Tu peux voir que " -#~ "nous n'avons pas encore importé le " -#~ "package :code:`flwr` pour l'apprentissage " -#~ "fédéré. Cela sera fait plus tard." -#~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." -#~ msgstr "" -#~ "La fonction :code:`load_data()` charge les " -#~ "ensembles d'entraînement et de test " -#~ "MNIST." +#~ msgid "Clone the flower repository." +#~ msgstr "**Fourche le dépôt de Flower**" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" -#~ "Comme nous l'avons déjà mentionné, nous" -#~ " utiliserons l'ensemble de données MNIST" -#~ " pour cette charge de travail " -#~ "d'apprentissage automatique. L'architecture du " -#~ "modèle (un modèle :code:`Séquentiel` très " -#~ "simple) est définie dans :code:`model()`." -#~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." -#~ msgstr "" -#~ "Nous devons maintenant définir la " -#~ "formation (fonction :code:`train()`) qui passe" -#~ " en boucle sur l'ensemble de la " -#~ "formation et mesure la perte pour " -#~ "chaque lot d'exemples de formation." +#~ msgid "``22.04``" +#~ msgstr "1.0.0rc1" -#~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." -#~ msgstr "" -#~ "L'évaluation du modèle est définie dans" -#~ " la fonction :code:`test()`. Cette fonction" -#~ " passe en boucle sur tous les " -#~ "échantillons de test et mesure la " -#~ "perte et la précision du modèle en" -#~ " fonction de l'ensemble des données " -#~ "de test." +#~ msgid "``23.0.1``" +#~ msgstr "1.0.0rc1" -#~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." -#~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, " -#~ "l'entraînement et l'évaluation, nous pouvons" -#~ " tout assembler et entraîner notre " -#~ "modèle sur MNIST. Note que le " -#~ "dispositif GPU/CPU pour l'entraînement et " -#~ "le test est défini dans le " -#~ ":code:`ctx` (contexte)." +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "" -#~ "Tu peux maintenant exécuter ta charge" -#~ " de travail (centralisée) d'apprentissage " -#~ "automatique MXNet :" +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." -#~ msgstr "" -#~ "Jusqu'à présent, tout cela devrait te" -#~ " sembler assez familier si tu as " -#~ "déjà utilisé MXNet (ou même PyTorch)." -#~ " Passons à l'étape suivante et " -#~ "utilisons ce que nous avons construit" -#~ " pour créer un simple système " -#~ "d'apprentissage fédéré composé d'un serveur" -#~ " et de deux clients." - -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet rencontre Flower" +#~ "The following example creates a base " +#~ "Ubuntu/Alpine image with Python 3.11.0, " +#~ "pip 23.0.1, setuptools 69.0.2 and Flower" +#~ " 1.8.0:" +#~ msgstr "" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "The name of image is ``flwr_base`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." #~ msgstr "" -#~ "Jusqu'à présent, il n'était pas facile" -#~ " d'utiliser les charges de travail " -#~ "MXNet pour l'apprentissage fédéré car " -#~ "l'apprentissage fédéré n'est pas pris en" -#~ " charge dans MXNet. Comme Flower est" -#~ " totalement agnostique vis-à-vis du cadre" -#~ " d'apprentissage automatique sous-jacent, " -#~ "il peut être utilisé pour fédérer " -#~ "des charges de travail d'apprentissage " -#~ "automatique arbitraires. Cette section te " -#~ "montrera comment Flower peut être " -#~ "utilisé pour fédérer notre charge de " -#~ "travail MXNet centralisée." -#~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ "Le concept pour fédérer une charge " -#~ "de travail existante est toujours le " -#~ "même et facile à comprendre. Nous " -#~ "devons démarrer un *serveur* et ensuite" -#~ " utiliser le code dans " -#~ ":code:`mxnet_mnist.py` pour les *clients* qui" -#~ " sont connectés au *serveur*. Le " -#~ "*serveur* envoie les paramètres du " -#~ "modèle aux clients. Les *clients* " -#~ "exécutent la formation et mettent à " -#~ "jour les paramètres. Les paramètres mis" -#~ " à jour sont renvoyés au *serveur*" -#~ " qui fait la moyenne de toutes " -#~ "les mises à jour de paramètres " -#~ "reçues. Ceci décrit un tour du " -#~ "processus d'apprentissage fédéré et nous " -#~ "répétons cette opération pour plusieurs " -#~ "tours." #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" #~ msgstr "" -#~ "Enfin, nous allons définir la logique" -#~ " de notre *client* dans :code:`client.py`" -#~ " et nous appuyer sur l'entraînement " -#~ "MXNet défini précédemment dans " -#~ ":code:`mxnet_mnist.py`. Notre *client* doit " -#~ "importer :code:`flwr`, mais aussi " -#~ ":code:`mxnet` pour mettre à jour les " -#~ "paramètres de notre modèle MXNet :" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "build argument." #~ msgstr "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Démarrer le serveur" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ "La partie la plus difficile est de" -#~ " transformer les paramètres MXNet de " -#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" -#~ " les rendre lisibles pour Flower." + +#~ msgid "**Via the UI**" +#~ msgstr "**Review the PR**" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ "Les deux méthodes :code:`NumPyClient` " -#~ ":code:`fit` et :code:`evaluate` utilisent les" -#~ " fonctions :code:`train()` et :code:`test()` " -#~ "définies précédemment dans :code:`mxnet_mnist.py`." -#~ " Ce que nous faisons vraiment ici," -#~ " c'est que nous indiquons à Flower," -#~ " par le biais de notre sous-" -#~ "classe :code:`NumPyClient`, laquelle de nos" -#~ " fonctions déjà définies doit être " -#~ "appelée pour l'entraînement et l'évaluation." -#~ " Nous avons inclus des annotations de" -#~ " type pour te donner une meilleure" -#~ " compréhension des types de données " -#~ "qui sont transmis." #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, la " -#~ "formation et l'évaluation, nous pouvons " -#~ "tout rassembler et former notre modèle" -#~ " :code:`Sequential` sur MNIST." #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" -#~ "dans chaque fenêtre (assure-toi que " -#~ "le serveur est toujours en cours " -#~ "d'exécution avant de le faire) et " -#~ "tu verras ton projet MXNet exécuter " -#~ "l'apprentissage fédéré sur deux clients. " -#~ "Félicitations !" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." #~ msgstr "" -#~ "Le code source complet de cet " -#~ "exemple : `MXNet : From Centralized " -#~ "To Federated (Code) " -#~ "`_. Notre " -#~ "exemple est bien sûr un peu trop" -#~ " simplifié parce que les deux clients" -#~ " chargent exactement le même ensemble " -#~ "de données, ce qui n'est pas " -#~ "réaliste. Tu es maintenant prêt à " -#~ "explorer ce sujet plus en profondeur." -#~ " Pourquoi ne pas utiliser un CNN " -#~ "ou un ensemble de données différent " -#~ "? Pourquoi ne pas ajouter d'autres " -#~ "clients ?" -#~ msgid "with the following command sequence:" -#~ msgstr "avec la séquence de commandes suivante :" +#~ msgid "Preliminarities" +#~ msgstr "" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "Exemple : JAX - Exécuter JAX Federated" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" #~ msgstr "" -#~ "Si tu es un chercheur, tu peux " -#~ "très bien utiliser les certificats " -#~ "auto-signés générés à l'aide des " -#~ "scripts qui font partie de ce " -#~ "guide." #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" + +#~ msgid "Before you start, make sure that the Docker daemon is running:" #~ msgstr "" -#~ "Nous allons maintenant montrer comment " -#~ "écrire un serveur qui utilise les " -#~ "scripts générés précédemment." #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" + +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" -#~ "Lorsqu'il fournit des certificats, le " -#~ "serveur attend un tuple de trois " -#~ "certificats. :code:`Path` peut être utilisé" -#~ " pour lire facilement le contenu de" -#~ " ces fichiers en chaînes d'octets, ce" -#~ " qui est le type de données " -#~ "attendu par :code:`start_server`." #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." #~ msgstr "" -#~ msgid "Flower server" -#~ msgstr "Serveur de Flower" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" + +#~ msgid "Quickstart" +#~ msgstr "Démarrage rapide de JAX" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " #~ "The ``--rm`` flag tells Docker to " #~ "remove the container after it exits." #~ msgstr "" #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " #~ "Docker flag ``--rm``, the state is " #~ "not persisted between container starts. " #~ "We will show below how to save " @@ -31423,7 +35864,7 @@ msgstr "" #~ "on ``http://localhost:9091`` and the Fleet " #~ "API on ``http://localhost:9092``. Lastly, any" #~ " flag that comes after the tag " -#~ "is passed to the Flower server. " +#~ "is passed to the Flower SuperLink. " #~ "Here, we are passing the flag " #~ "``--insecure``." #~ msgstr "" @@ -31435,252 +35876,742 @@ msgstr "" #~ "testing purposes. We strongly recommend " #~ "enabling `SSL `_ when " +#~ "ssl-for-secure-connections>`__ when " #~ "deploying to a production environment." #~ msgstr "" #~ msgid "" #~ "You can use ``--help`` to view all" -#~ " available flags that the server " +#~ " available flags that the SuperLink " #~ "supports:" #~ msgstr "" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" + #~ msgid "" #~ "If you want to persist the state" -#~ " of the server on your host " +#~ " of the SuperLink on your host " #~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " +#~ "specify a directory where you want " +#~ "to save the file on your host " #~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " #~ "state from the file. To start the" -#~ " server with an empty database, " +#~ " SuperLink with an empty database, " #~ "simply remove the ``state.db`` file." #~ msgstr "" #~ msgid "" #~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." -#~ msgstr "" - -#~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." #~ msgstr "" #~ msgid "" #~ "Assuming all files we need are in" #~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " +#~ " can use the flag ``--volume`` to " #~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" + +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" + +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" + +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" + +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" + +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" + +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" + +#~ msgid "Run with root user privileges" +#~ msgstr "" + +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" + +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" + +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" + +#~ msgid "Using a different Flower version" +#~ msgstr "" + +#~ msgid "Pinning a Docker image to a specific version" +#~ msgstr "" + +#~ msgid "" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgstr "" + +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.client `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.common `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.simulation `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid ":py:obj:`Context `\\ \\(state\\)" +#~ msgstr "" + +#~ msgid "State of your run." +#~ msgstr "" + +#~ msgid "Metrics record." +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" +#~ msgstr "" + +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#~ msgstr "" + +#~ msgid "d defaults to None." +#~ msgstr "" + +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr "" -#~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid ":py:obj:`partition_id `\\" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgid "An identifier telling which data partition a ClientApp should use." #~ msgstr "" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "Tout d'abord, démarre un serveur Flower :" - #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" #~ msgstr "" -#~ "Flower 1.0 : ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" #~ msgstr "" -#~ msgid "start\\_driver" -#~ msgstr "start_client" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "flower-fleet-api" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "key shares." #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "Simulation de moniteur" - #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" -#~ msgid "Quickstart MXNet" -#~ msgstr "Démarrage rapide de MXNet" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à entraîner un réseau neuronal " +#~ "convolutif sur CIFAR10 à l'aide de " +#~ "Flower et PyTorch." #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locales. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un modèle :code:`Sequential` " -#~ "sur MNIST à l'aide de Flower et" -#~ " de MXNet." +#~ "Maintenant que nous avons une idée " +#~ "générale de ce qui se passe, " +#~ "commençons. Nous devons d'abord installer " +#~ "Flower. Tu peux le faire en " +#~ "exécutant :" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, allons-y et installons " +#~ "PyTorch et la bibliothèque **torchvision** " +#~ ":" #~ msgid "" #~ "Now that we have all our " @@ -31688,697 +36619,915 @@ msgstr "" #~ "simple distributed training with two " #~ "clients and one server. Our training " #~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, lançons une formation " -#~ "distribuée simple avec deux clients et" -#~ " un serveur. Notre procédure de " -#~ "formation et l'architecture du réseau " -#~ "sont basées sur le tutoriel de " -#~ "reconnaissance de chiffres écrits à la" -#~ " main du MXNet " -#~ "`_." +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons une " +#~ "formation distribuée simple avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " de formation et l'architecture de " +#~ "notre réseau sont basées sur `Deep " +#~ "Learning with PyTorch " +#~ "`_" +#~ " de PyTorch." #~ msgid "" #~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " +#~ "import Flower and PyTorch related " #~ "packages:" #~ msgstr "" #~ "Dans un fichier appelé :code:`client.py`, " #~ "importe Flower et les paquets liés " -#~ "au MXNet :" +#~ "à PyTorch :" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" - -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." -#~ msgstr "" -#~ "Nous utilisons MXNet pour charger MNIST," -#~ " un ensemble de données de " -#~ "classification d'images populaire de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " -#~ "télécharge les données d'entraînement et " -#~ "de test." - -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" -#~ "Définis l'entraînement et la perte avec" -#~ " MXNet. Nous entraînons le modèle en" -#~ " parcourant en boucle l'ensemble des " -#~ "données, nous mesurons la perte " -#~ "correspondante et nous l'optimisons." +#~ "En outre, nous définissons l'attribution " +#~ "des appareils dans PyTorch avec :" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "Ensuite, nous définissons la validation " -#~ "de notre modèle d'apprentissage automatique." -#~ " Nous effectuons une boucle sur " -#~ "l'ensemble de test et mesurons à " -#~ "la fois la perte et la précision" -#~ " sur l'ensemble de test." +#~ "Nous utilisons PyTorch pour charger " +#~ "CIFAR10, un ensemble de données de " +#~ "classification d'images colorées populaire " +#~ "pour l'apprentissage automatique. Le " +#~ ":code:`DataLoader()` de PyTorch télécharge les" +#~ " données d'entraînement et de test " +#~ "qui sont ensuite normalisées." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." #~ msgstr "" -#~ "Après avoir défini la formation et " -#~ "le test d'un modèle d'apprentissage " -#~ "automatique MXNet, nous utilisons ces " -#~ "fonctions pour mettre en œuvre un " -#~ "client Flower." - -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" +#~ "Définis la perte et l'optimiseur avec" +#~ " PyTorch L'entraînement de l'ensemble de" +#~ " données se fait en bouclant sur " +#~ "l'ensemble de données, en mesurant la" +#~ " perte correspondante et en l'optimisant." #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." #~ msgstr "" -#~ "Après avoir chargé l'ensemble de données" -#~ " avec :code:`load_data()`, nous effectuons " -#~ "une propagation vers l'avant pour " -#~ "initialiser le modèle et les paramètres" -#~ " du modèle avec :code:`model(init)`. " -#~ "Ensuite, nous implémentons un client " -#~ "Flower." +#~ "Définis ensuite la validation du réseau" +#~ " d'apprentissage automatique. Nous passons " +#~ "en boucle sur l'ensemble de test " +#~ "et mesurons la perte et la " +#~ "précision de l'ensemble de test." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" -#~ "Flower fournit une classe de commodité" -#~ " appelée :code:`NumPyClient` qui facilite " -#~ "l'implémentation de l'interface :code:`Client` " -#~ "lorsque ta charge de travail utilise " -#~ "MXNet. L'implémentation de :code:`NumPyClient` " -#~ "signifie généralement la définition des " -#~ "méthodes suivantes (:code:`set_parameters` est " -#~ "cependant facultatif) :" - -#~ msgid "They can be implemented in the following way:" -#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" +#~ "Après avoir défini l'entraînement et le" +#~ " test d'un modèle d'apprentissage " +#~ "automatique PyTorch, nous utilisons les " +#~ "fonctions pour les clients Flower." #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" -#~ "Nous pouvons maintenant créer une " -#~ "instance de notre classe :code:`MNISTClient`" -#~ " et ajouter une ligne pour exécuter" -#~ " ce client :" +#~ "Les clients de Flower utiliseront un " +#~ "CNN simple adapté de \"PyTorch : A" +#~ " 60 Minute Blitz\" :" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ ":code:`NumPyClient` et d'appeler " -#~ ":code:`fl.client.start_client()`. La chaîne " -#~ ":code:`\"0.0.0:8080\"` indique au client à " -#~ "quel serveur se connecter. Dans notre" -#~ " cas, nous pouvons exécuter le " -#~ "serveur et le client sur la même" -#~ " machine, c'est pourquoi nous utilisons " -#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" -#~ " charge de travail véritablement fédérée" -#~ " avec le serveur et les clients " -#~ "s'exécutant sur des machines différentes, " -#~ "tout ce qui doit changer est " -#~ ":code:`server_address` que nous transmettons " -#~ "au client." +#~ "Après avoir chargé l'ensemble des " +#~ "données avec :code:`load_data()`, nous " +#~ "définissons l'interface Flower." #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "Le client et le serveur étant " -#~ "prêts, nous pouvons maintenant tout " -#~ "exécuter et voir l'apprentissage fédéré " -#~ "en action. Les systèmes d'apprentissage " -#~ "fédéré ont généralement un serveur et" -#~ " plusieurs clients. Nous devons donc " -#~ "commencer par démarrer le serveur :" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise PyTorch. Mettre en œuvre" +#~ " :code:`NumPyClient` signifie généralement " +#~ "définir les méthodes suivantes " +#~ "(:code:`set_parameters` est cependant facultatif)" +#~ " :" + +#~ msgid "which can be implemented in the following way:" +#~ msgstr "qui peut être mis en œuvre de la manière suivante :" #~ msgid "" #~ "Congratulations! You've successfully built and" #~ " run your first federated learning " #~ "system. The full `source code " #~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "pytorch/client.py>`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" #~ "Félicitations ! Tu as réussi à " #~ "construire et à faire fonctionner ton" #~ " premier système d'apprentissage fédéré. Le" #~ " code source complet " #~ "`_ de cet exemple se " -#~ "trouve dans :code:`examples/quickstart-mxnet`." +#~ "pytorch/client.py>`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-pytorch`." -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "Mélange les données et leur étiquette" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" -#~ "Nous chargeons l'ensemble de données " -#~ "MNIST de `OpenML `_," -#~ " un ensemble de données de " -#~ "classification d'images populaires de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " -#~ "les données d'entraînement et de test." -#~ " L'ensemble d'entraînement est ensuite " -#~ "divisé en 10 partitions avec " -#~ ":code:`utils.partition()`." -#~ msgid "Let's get stated!" -#~ msgstr "Allons-y, déclarons-le !" +#~ msgid "Implementing a Flower client" +#~ msgstr "Mise en place d'un client Flower" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" +#~ "Pour mettre en œuvre le client " +#~ "Flower, nous créons une sous-classe " +#~ "de ``flwr.client.NumPyClient`` et mettons en" +#~ " œuvre les trois méthodes " +#~ "``get_parameters``, ``fit`` et ``evaluate`` :" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" +#~ "La fonction ``start_simulation`` accepte un" +#~ " certain nombre d'arguments, parmi lesquels" +#~ " le ``client_fn`` utilisé pour créer " +#~ "les instances ``FlowerClient``, le nombre " +#~ "de clients à simuler (``num_clients``), " +#~ "le nombre de tours d'apprentissage " +#~ "fédéré (``num_rounds``), et la stratégie. " +#~ "La stratégie encapsule l'approche/algorithme " +#~ "d'apprentissage fédéré, par exemple, " +#~ "*Federated Averaging* (FedAvg)." -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" +#~ "La seule chose qui reste à faire" +#~ " est d'indiquer à la stratégie " +#~ "d'appeler cette fonction chaque fois " +#~ "qu'elle reçoit des dictionnaires de " +#~ "métriques d'évaluation de la part des" +#~ " clients :" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub `_" -#~ " that include all necessary dependencies" -#~ " for running the SuperLink. You can" -#~ " also build your own custom docker" -#~ " images from scratch with a different" -#~ " version of Python or Ubuntu if " -#~ "that is what you need. In this " -#~ "guide, we will explain what images " -#~ "exist and how to build them " -#~ "locally." +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid "" -#~ "Both, base and SuperLink image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid "``3.11``" -#~ msgstr "1.0.0rc1" - -#~ msgid "``UBUNTU_VERSION``" +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "Version of the official Ubuntu Docker image." -#~ msgstr "" +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" -#~ msgid "Defaults to ``22.04``." +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "The following example creates a base " -#~ "image with Python 3.11.0, pip 23.0.1 " -#~ "and setuptools 69.0.2:" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "Building the SuperLink image" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Defaults to ``flwr/base``." +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "Évaluer la réponse d'un client." +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." +#~ msgid "run\\_server\\_app" #~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" -#~ msgid "The PyPI package to install." +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Flux de travail" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" #~ msgid "" -#~ "The following example creates a " -#~ "SuperLink image with the official Flower" -#~ " base image py3.11-ubuntu22.04 and Flower" -#~ " 1.8.0:" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY``, " -#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " -#~ "arguments." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" -#~ msgid "Creating New Messages" -#~ msgstr "Création de nouveaux messages" - #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" -#~ "Voici un guide simple pour créer " -#~ "un nouveau type de message entre " -#~ "le serveur et les clients dans " -#~ "Flower." #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ "Supposons que nous ayons les fonctions" -#~ " suivantes dans :code:`server.py` et " -#~ ":code:`numpy_client.py`..." -#~ msgid "Server's side:" -#~ msgstr "Côté serveur :" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" -#~ msgid "Client's side:" -#~ msgstr "Côté client :" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" -#~ "Voyons maintenant ce que nous devons " -#~ "mettre en œuvre pour que cette " -#~ "simple fonction entre le serveur et " -#~ "le client fonctionne !" - -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "Types de messages pour les tampons de protocole" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en savoir plus." #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" -#~ "La première chose à faire est de" -#~ " définir un type de message pour " -#~ "le système RPC dans :code:`transport.proto`." -#~ " Notez que nous devons le faire " -#~ "à la fois pour les messages de " -#~ "demande et de réponse. Pour plus " -#~ "de détails sur la syntaxe de " -#~ "proto3, veuillez consulter la `documentation" -#~ " officielle `_." -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "Dans le bloc :code:`ServerMessage` :" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " à l'aide des transformateurs Hugging " +#~ "Face et de Flower !" -#~ msgid "Within the ClientMessage block:" -#~ msgstr "Dans le bloc ClientMessage :" +#~ msgid "Dependencies" +#~ msgstr "Dépendances" #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ "Veille à ajouter également un champ " -#~ "du type de message nouvellement créé " -#~ "dans :code:`oneof msg`." - -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" +#~ "Pour suivre ce tutoriel, tu devras " +#~ "installer les paquets suivants : " +#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " +#~ ":code:`torch`, et :code:`transformers`. Cela " +#~ "peut être fait en utilisant :code:`pip`" +#~ " :" -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "Flux de travail standard pour le visage" -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "Fonctions de sérialisation et de désérialisation" +#~ msgid "Handling the data" +#~ msgstr "Traitement des données" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" -#~ "La prochaine étape consiste à ajouter" -#~ " des fonctions pour sérialiser et " -#~ "désérialiser les types de données Python" -#~ " vers ou à partir des types de" -#~ " messages RPC définis. Tu dois " -#~ "ajouter ces fonctions dans :code:`serde.py`." - -#~ msgid "The four functions:" -#~ msgstr "Les quatre fonctions :" +#~ "Pour récupérer le jeu de données " +#~ "IMDB, nous utiliserons la bibliothèque " +#~ ":code:`datasets` de Hugging Face. Nous " +#~ "devons ensuite tokeniser les données et" +#~ " créer des :code:`PyTorch` dataloaders, ce" +#~ " qui est fait dans la fonction " +#~ ":code:`load_data` :" -#~ msgid "Sending the Message from the Server" -#~ msgstr "Envoi du message à partir du serveur" +#~ msgid "Training and testing the model" +#~ msgstr "Former et tester le modèle" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ "Écris maintenant la fonction de demande" -#~ " dans ta classe Client Proxy (par " -#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" -#~ " les fonctions serde que tu viens " -#~ "de créer :" +#~ "Une fois que nous avons trouvé un" +#~ " moyen de créer notre trainloader et" +#~ " notre testloader, nous pouvons nous " +#~ "occuper de l'entraînement et du test." +#~ " C'est très similaire à n'importe " +#~ "quelle boucle d'entraînement ou de test" +#~ " :code:`PyTorch` :" -#~ msgid "Receiving the Message by the Client" -#~ msgstr "Réception du message par le client" +#~ msgid "Creating the model itself" +#~ msgstr "Créer le modèle lui-même" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ "Dernière étape ! Modifie le code " -#~ "dans :code:`message_handler.py` pour vérifier " -#~ "le champ de ton message et appeler" -#~ " la fonction :code:`example_response`. N'oublie" -#~ " pas d'utiliser les fonctions serde !" +#~ "Pour créer le modèle lui-même, " +#~ "nous allons simplement charger le modèle" +#~ " distillBERT pré-entraîné en utilisant le" +#~ " :code:`AutoModelForSequenceClassification` de Hugging" +#~ " Face :" -#~ msgid "Within the handle function:" -#~ msgstr "Dans le cadre de la fonction de poignée :" +#~ msgid "Creating the IMDBClient" +#~ msgstr "Création du client IMDBC" -#~ msgid "And add a new function:" -#~ msgstr "Et ajoute une nouvelle fonction :" +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "Pour fédérer notre exemple à plusieurs" +#~ " clients, nous devons d'abord écrire " +#~ "notre classe de client Flower (héritant" +#~ " de :code:`flwr.client.NumPyClient`). C'est très" +#~ " facile, car notre modèle est un " +#~ "modèle :code:`PyTorch` standard :" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ "La fonction :code:`get_parameters` permet au" +#~ " serveur d'obtenir les paramètres du " +#~ "client. Inversement, la fonction " +#~ ":code:`set_parameters` permet au serveur " +#~ "d'envoyer ses paramètres au client. " +#~ "Enfin, la fonction :code:`fit` forme le" +#~ " modèle localement pour le client, et" +#~ " la fonction :code:`evaluate` teste le " +#~ "modèle localement et renvoie les mesures" +#~ " correspondantes." + +#~ msgid "Starting the server" +#~ msgstr "Démarrer le serveur" -#~ msgid "Hopefully, when you run your program you will get the intended result!" -#~ msgstr "" -#~ "Avec un peu de chance, lorsque tu" -#~ " exécuteras ton programme, tu obtiendras" -#~ " le résultat escompté !" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "Maintenant que nous avons un moyen " +#~ "d'instancier les clients, nous devons " +#~ "créer notre serveur afin d'agréger les" +#~ " résultats. Avec Flower, cela peut " +#~ "être fait très facilement en choisissant" +#~ " d'abord une stratégie (ici, nous " +#~ "utilisons :code:`FedAvg`, qui définira les " +#~ "poids globaux comme la moyenne des " +#~ "poids de tous les clients à chaque" +#~ " tour) et en utilisant ensuite la " +#~ "fonction :code:`flwr.server.start_server` :" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "La fonction :code:`weighted_average` est là" +#~ " pour fournir un moyen d'agréger les" +#~ " mesures réparties entre les clients " +#~ "(en gros, cela nous permet d'afficher" +#~ " une belle moyenne de précision et" +#~ " de perte pour chaque tour)." + +#~ msgid "Putting everything together" +#~ msgstr "Tout assembler" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" +#~ "Nous pouvons maintenant démarrer des " +#~ "instances de clients en utilisant :" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" +#~ "Et ils pourront se connecter au " +#~ "serveur et démarrer la formation " +#~ "fédérée." #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" +#~ "Si tu veux voir tout ce qui " +#~ "est mis ensemble, tu devrais consulter" +#~ " l'exemple de code complet : " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" +#~ "Bien sûr, c'est un exemple très " +#~ "basique, et beaucoup de choses peuvent" +#~ " être ajoutées ou modifiées, il " +#~ "s'agissait juste de montrer avec quelle" +#~ " simplicité on pouvait fédérer un " +#~ "flux de travail Hugging Face à " +#~ "l'aide de Flower." #~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" +#~ "Notez que dans cet exemple, nous " +#~ "avons utilisé :code:`PyTorch`, mais nous " +#~ "aurions très bien pu utiliser " +#~ ":code:`TensorFlow`." #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant PyTorch Lightning et " +#~ "Flower !" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." #~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en moins de 20 lignes de code" +#~ " !" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" +#~ "Comme nous voulons utiliser l'API Keras" +#~ " de TensorFlow (TF), nous devons " +#~ "également installer TF :" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" +#~ "Ensuite, dans un fichier appelé " +#~ ":code:`client.py`, importe Flower et " +#~ "TensorFlow :" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "Nous utilisons les utilitaires Keras de" +#~ " TF pour charger CIFAR10, un ensemble" +#~ " de données de classification d'images " +#~ "colorées populaire pour l'apprentissage " +#~ "automatique. L'appel à " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " +#~ "CIFAR10, le met en cache localement, " +#~ "puis renvoie l'ensemble d'entraînement et " +#~ "de test sous forme de NumPy " +#~ "ndarrays." #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" +#~ "Ensuite, nous avons besoin d'un modèle." +#~ " Pour les besoins de ce tutoriel, " +#~ "nous utilisons MobilNetV2 avec 10 " +#~ "classes de sortie :" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour former le réseau neuronal que " +#~ "nous avons défini plus tôt)." + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise Keras. L'interface " +#~ ":code:`NumPyClient` définit trois méthodes qui" +#~ " peuvent être mises en œuvre de " +#~ "la manière suivante :" #~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`CifarClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"[: :]:8080\"` indique au client " +#~ "à quel serveur se connecter. Dans " +#~ "notre cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"[: :]:8080\"`. Si nous exécutons " +#~ "une charge de travail véritablement " +#~ "fédérée avec le serveur et les " +#~ "clients fonctionnant sur des machines " +#~ "différentes, tout ce qui doit changer" +#~ " est l'adresse :code:`server_address` vers " +#~ "laquelle nous dirigeons le client." -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "Each client will have its own dataset." +#~ msgstr "Chaque client aura son propre ensemble de données." -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" +#~ "Tu devrais maintenant voir comment la" +#~ " formation se déroule dans le tout" +#~ " premier terminal (celui qui a " +#~ "démarré le serveur) :" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " `code source complet " +#~ "`_ pour cela se trouve" +#~ " dans :code:`examples/quickstart-tensorflow/client.py`." -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index d0ba4f6ed5a1..4c738e16b434 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,78 +7,226 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" -"PO-Revision-Date: 2024-06-25 10:43+0000\n" -"Last-Translator: \"Young D. Kwon\" \n" -"Language-Team: Korean \n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" +"PO-Revision-Date: 2024-08-23 13:09+0000\n" +"Last-Translator: Seulki Yun \n" "Language: ko\n" +"Language-Team: Korean \n" +"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Weblate 5.6-rc\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Flower 아키텍처" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." + +#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "엣지 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" -"`Flower `_ core framework architecture with Edge Client " -"Engine" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔" -"진" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "가상 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:100 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔" -"진" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with both Virtual " -"Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라" -"이언트 엔진" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "Docker Flower 이미지를 Locally 구축하는 방법" #: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" -"Flower provides pre-made docker images on `Docker Hub `_ that include all necessary dependencies for running the " -"SuperLink, SuperNode or ServerApp. You can also build your own custom docker " -"images from scratch with a different version of Python or Linux distribution " -"(Ubuntu/Alpine) if that is what you need. In this guide, we will explain " -"what images exist and how to build them locally." +"Flower provides pre-made docker images on `Docker Hub " +"`_ that include all necessary dependencies" +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" -"Flower는 'Docker Hub '_에서 미리 만들어진 " -"Docker 이미지들을 제공합니다. 해당 이미지들은 SuperLink, ServerNode 또는 " -"ServerApp을 실행하는 데 필요한 모든 dependencies를 포함합니다. 필요한 경우 다" -"른 버전의 Python이나 Linux 배포판(Ubuntu/Alpine)을 사용해 처음부터 사용자 정" -"의 Docker 이미지를 빌드할 수도 있습니다. 이 가이드에서는 존재하는 이미지들과 " -"이들을 로컬에서 빌드하는 방법에 대해 설명하겠습니다." +"Flower는 'Docker Hub '_에서 미리 만들어진 Docker " +"이미지들을 제공합니다. 해당 이미지들은 SuperLink, ServerNode 또는 ServerApp을 실행하는 데 필요한 모든 " +"dependencies를 포함합니다. 필요한 경우 다른 버전의 Python이나 Linux 배포판(Ubuntu/Alpine)을 사용해" +" 처음부터 사용자 정의 Docker 이미지를 빌드할 수도 있습니다. 이 가이드에서는 존재하는 이미지들과 이들을 로컬에서 빌드하는 " +"방법에 대해 설명하겠습니다." #: ../../source/contributor-how-to-build-docker-images.rst:10 msgid "" @@ -87,192 +235,211 @@ msgid "" msgstr "시작하기 전에, 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다." #: ../../source/contributor-how-to-build-docker-images.rst:12 -msgid "Clone the flower repository." -msgstr "Flower 레포지토리를 복제합니다." +#, fuzzy +msgid "Clone the ``flower`` repository." +msgstr "플라워 레포지토리를 클론합니다." #: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 msgid "Verify the Docker daemon is running." msgstr "Docker 데몬이 실행 중인지 확인하십시오." #: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -":doc:Run Flower using Docker 의 첫 번째 섹션" -"을 따라 주십시오. 해당 부분을 더 자세히 설명해 줍니다." - -#: ../../source/contributor-how-to-build-docker-images.rst:25 msgid "" "The build instructions that assemble the images are located in the " -"respective Dockerfiles. You can find them in the subdirectories of ``src/" -"docker``." +"respective Dockerfiles. You can find them in the subdirectories of " +"``src/docker``." msgstr "" -"이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/" -"docker\" 의 하위 디렉토리에서 찾을 수 있습니다." +"이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/docker\" 의 하위 디렉토리에서 찾을 수 " +"있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:23 msgid "" "Flower Docker images are configured via build arguments. Through build " -"arguments, we can make the creation of images more flexible. For example, in " -"the base image, we can specify the version of Python to install using the " -"``PYTHON_VERSION`` build argument. Some of the build arguments have default " -"values, others must be specified when building the image. All available " -"build arguments for each image are listed in one of the tables below." -msgstr "" -"Flower Docker는 빌드 전달인자를 통해 구성됩니다. 빌드 argument들을 통해, " -"이미지를 보다 유연하게 생성할 수 있습니다. 예를 들어, base 이미지에서 " -"\"PYTHON_VERSION\" 빌드 전달인자를 사용하여 Python 버전을 지정할 수 " -"있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 " -"합니다. 각 이미지에 사용할 수 있는 모든 빌드 전달인자는 아래 표 중에 " -"있습니다." +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." +msgstr "" +"Flower Docker는 빌드 전달인자를 통해 구성됩니다. 빌드 argument들을 통해, 이미지를 보다 유연하게 생성할 수 " +"있습니다. 예를 들어, base 이미지에서 \"PYTHON_VERSION\" 빌드 전달인자를 사용하여 Python 버전을 지정할 수" +" 있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 합니다. 각 이미지에 사용할 수 있는 모든 빌드 " +"전달인자는 아래 표 중에 있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:35 -msgid "Building the base image" +#: ../../source/contributor-how-to-build-docker-images.rst:30 +#, fuzzy +msgid "Building the Base Image" msgstr "기본 이미지 빌드" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:36 #: ../../source/contributor-how-to-build-docker-images.rst:98 msgid "Build argument" msgstr "빌드 전달인자" -#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:37 #: ../../source/contributor-how-to-build-docker-images.rst:99 msgid "Description" msgstr "설명" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:38 #: ../../source/contributor-how-to-build-docker-images.rst:100 msgid "Required" msgstr "필수" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:39 #: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/docker/persist-superlink-state.rst:18 +#: ../../source/docker/pin-version.rst:11 +#: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "예시" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:40 msgid "``DISTRO``" msgstr "``DISTRO``" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:41 msgid "The Linux distribution to use as the base image." msgstr "기본 이미지 사용을 위한 Linux 배포판." -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 +#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "No" msgstr "아니오" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:43 msgid "``ubuntu``" msgstr "``ubuntu``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:44 msgid "``DISTRO_VERSION``" msgstr "``DISTRO_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "Version of the Linux distribution." msgstr "Linux 배포판 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:52 -msgid "``22.04``" -msgstr "``22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:48 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid "Version of ``python`` to be installed." msgstr "설치 된 ``python`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "``3.11`` or ``3.11.1``" msgstr "``3.11`` 또는 ``3.11.1``" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:52 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "Version of ``pip`` to be installed." msgstr "설치 된 ``pip`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "Yes" msgstr "예" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -msgid "``23.0.1``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:56 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid "Version of ``setuptools`` to be installed." msgstr "설치 된 ``setuptools`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:64 -msgid "``69.0.2``" -msgstr "``69.0.2``" +#: ../../source/contributor-how-to-build-docker-images.rst:59 +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:60 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:61 msgid "Version of Flower to be installed." msgstr "설치 된 Flower 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:68 -msgid "``1.8.0``" -msgstr "``1.8.0``" +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:64 msgid "``FLWR_PACKAGE``" msgstr "``FLWR_PACKAGE``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid "The Flower package to be installed." msgstr "설치 할 Flower 패키지." -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "``flwr`` or ``flwr-nightly``" msgstr "``flwr`` 또는 ``flwr-nightly``" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "예시 요청" + +#: ../../source/contributor-how-to-build-docker-images.rst:73 +#, fuzzy msgid "" -"The following example creates a base Ubuntu/Alpine image with Python 3.11.0, " -"pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" -"다음 예시에서는 Python 3.11.0, pip 23.0.1, setuptools 및 Flower 1.8.0으로 기" -"본 Ubuntu/Alpine 이미지를 만듭니다:" +"다음 예시에서는 Python 3.11.0, pip 23.0.1, setuptools 및 Flower 1.8.0으로 기본 " +"Ubuntu/Alpine 이미지를 만듭니다:" #: ../../source/contributor-how-to-build-docker-images.rst:88 +#, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the " -"build arguments as well as the name and tag can be adapted to your needs. " -"These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" -"이미지의 이름은 ``flwr_base``이고 태그는 ``0.1.0``입니다. 필요에 따라 빌드 " -"전달인자들 뿐만 아니라 이름과 태그도 정할 수 있습니다. 이 값들은 예시일 " -"뿐입니다." +"이미지의 이름은 ``flwr_base``이고 태그는 ``0.1.0``입니다. 필요에 따라 빌드 전달인자들 뿐만 아니라 이름과 태그도" +" 정할 수 있습니다. 이 값들은 예시일 뿐입니다." #: ../../source/contributor-how-to-build-docker-images.rst:92 -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "SuperLink/SuperNode 또는 ServerApp 이미지 빌드" +#, fuzzy +msgid "Building a Flower Binary Image" +msgstr "기본 이미지 빌드" #: ../../source/contributor-how-to-build-docker-images.rst:102 msgid "``BASE_REPOSITORY``" @@ -295,59 +462,63 @@ msgid "The Tag of the Flower base image." msgstr "Flower 기본 이미지의 태그." #: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" -msgstr "``1.8.0-py3.10-ubuntu22.04``" +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:111 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image with " -"the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -"다음 예시에서는 공식 Flower 기본 이미지로 SuperLink/SuperNode 또는 ServerApp" -"이미지를 만듭니다:" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:121 +#, fuzzy msgid "" -"If you want to use your own base image instead of the official Flower base " -"image, all you need to do is set the ``BASE_REPOSITORY`` build argument." +"If you want to use your own base image instead of the official Flower " +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" -"공식 Flower 기본 이미지 대신 자체 기본 이미지를 사용 하길 원한다면, " -"``BASE_REPOSITORY`` 빌드 전달인자들을 설정해야 합니다." +"공식 Flower 기본 이미지 대신 자체 기본 이미지를 사용 하길 원한다면, ``BASE_REPOSITORY`` 빌드 전달인자들을 " +"설정해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:132 msgid "After creating the image, we can test whether the image is working:" msgstr "이미지 생성 후에, 이미지가 작동하는지 테스트할 수 있습니다:" +#: ../../source/contributor-how-to-build-docker-images.rst:139 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "예시 요청" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "번역 기여" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" -"Since `Flower 1.5 `_ we have introduced translations to our doc pages, " -"but, as you might have noticed, the translations are often imperfect. If you " -"speak languages other than English, you might be able to help us in our " -"effort to make Federated Learning accessible to as many people as possible " -"by contributing to those translations! This might also be a great " -"opportunity for those wanting to become open source contributors with little " -"prerequisites." -msgstr "" -"`Flower 1.5 `_ 부터 문서 페이지에 번역을 도입했지만, 아시다시피 " -"번역이 불안전한 경우가 많습니다. 만일 영어 이외의 언어를 사용한다면, 많은 " -"사람들이 연합 학습에 접근할 수 있도록 번역 작업에 기여함으로써 저희의 노력에 " -"도움을 주실 수 있습니다! 이는 전제 조건이 거의 없는 오픈 소스 기여자가 " -"되고자 하는 사람들에게 좋은 기회가 될 수도 있습니다." +"Since `Flower 1.5 `_ we have introduced translations to " +"our doc pages, but, as you might have noticed, the translations are often" +" imperfect. If you speak languages other than English, you might be able " +"to help us in our effort to make Federated Learning accessible to as many" +" people as possible by contributing to those translations! This might " +"also be a great opportunity for those wanting to become open source " +"contributors with little prerequisites." +msgstr "" +"`Flower 1.5 `_ 부터 문서 페이지에 번역을 도입했지만, 아시다시피 번역이 불안전한 " +"경우가 많습니다. 만일 영어 이외의 언어를 사용한다면, 많은 사람들이 연합 학습에 접근할 수 있도록 번역 작업에 기여함으로써 저희의" +" 노력에 도움을 주실 수 있습니다! 이는 전제 조건이 거의 없는 오픈 소스 기여자가 되고자 하는 사람들에게 좋은 기회가 될 수도 " +"있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:13 msgid "" -"Our translation project is publicly available over on `Weblate `_, this where most of " -"the work will happen." +"Our translation project is publicly available over on `Weblate " +"`_, this " +"where most of the work will happen." msgstr "" -"번역 프로젝트는 `Weblate `_에서 공개적으로 진행되며, 대부분의 작업이 이곳에서 이루어집니다." +"번역 프로젝트는 `Weblate `_에서 공개적으로 진행되며, 대부분의 작업이 이곳에서 이루어집니다." #: ../../source/contributor-how-to-contribute-translations.rst:18 msgid "Contribute to existing languages" @@ -355,43 +526,43 @@ msgstr "기존 언어에 기여하기" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" -"The first thing you will need to do in order to contribute is to create a " -"free Weblate account on this `page `_. More information about profile settings can be found `here " +"The first thing you will need to do in order to contribute is to create a" +" free Weblate account on this `page " +"`_. More information about" +" profile settings can be found `here " "`_." msgstr "" -"기여를 하기 위해 가장 먼저 해야 할 일은 해당 `page `_에서 무료 Weblate 계정을 만드는 것입니다. 프로필 설" -"정에 대한 자세한 정보는 `here `_를 참조하세요." +"기여를 하기 위해 가장 먼저 해야 할 일은 해당 `page " +"`_에서 무료 Weblate 계정을 만드는 " +"것입니다. 프로필 설정에 대한 자세한 정보는 `here " +"`_를 참조하세요." #: ../../source/contributor-how-to-contribute-translations.rst:29 msgid "" -"Once you are signed in to Weblate, you can navigate to the `Flower Framework " -"project `_. " -"Here, you should see the different existing languages that can be found on " -"the website." +"Once you are signed in to Weblate, you can navigate to the `Flower " +"Framework project `_. Here, you should see the different existing languages" +" that can be found on the website." msgstr "" -"Weblate에 로그인한 후, `Flower Framework project `_로 이동할 수 있습니다. 여기에서 웹사이트에 " -"있는 다양한 기존 언어들을 확인할 수 있습니다." +"Weblate에 로그인한 후, `Flower Framework project " +"`_로 이동할 수 " +"있습니다. 여기에서 웹사이트에 있는 다양한 기존 언어들을 확인할 수 있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:34 msgid "" -"Once you have selected the language you want to contribute to, you should " -"see a similar interface to this:" +"Once you have selected the language you want to contribute to, you should" +" see a similar interface to this:" msgstr "기여하고자 하는 언어를 선택하면, 다음과 같은 인터페이스가 나타납니다:" #: ../../source/contributor-how-to-contribute-translations.rst:39 msgid "" "The most straight forward option here is to click on the ``Translate`` " -"button on the top right (in the ``Translation status`` section). This will " -"automatically bring you to the translation interface for untranslated " -"strings." +"button on the top right (in the ``Translation status`` section). This " +"will automatically bring you to the translation interface for " +"untranslated strings." msgstr "" -"여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 " -"``Translate`` 버튼을 클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터" -"페이스로 자동으로 이동합니다." +"여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 ``Translate`` 버튼을 " +"클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터페이스로 자동으로 이동합니다." #: ../../source/contributor-how-to-contribute-translations.rst:43 msgid "This is what the interface looks like:" @@ -399,47 +570,44 @@ msgstr "인터페이스는 다음과 같습니다:" #: ../../source/contributor-how-to-contribute-translations.rst:47 msgid "" -"You input your translation in the text box at the top and then, once you are " -"happy with it, you either press ``Save and continue`` (to save the " -"translation and go to the next untranslated string), ``Save and stay`` (to " -"save the translation and stay on the same page), ``Suggest`` (to add your " -"translation to suggestions for other users to view), or ``Skip`` (to go to " -"the next untranslated string without saving anything)." +"You input your translation in the text box at the top and then, once you " +"are happy with it, you either press ``Save and continue`` (to save the " +"translation and go to the next untranslated string), ``Save and stay`` " +"(to save the translation and stay on the same page), ``Suggest`` (to add " +"your translation to suggestions for other users to view), or ``Skip`` (to" +" go to the next untranslated string without saving anything)." msgstr "" -"번역문을 상단의 텍스트 상자에 입력한 후, 번역이 만족스러우면 ``Save and " -"continue``(번역을 저장하고 다음 미번역 문장으로 이동), ``Save and stay``(번역" -"을 저장하고 해당 페이지에 머무르기), ``Suggest`` (다른 사용자가 볼 수 있도록 " -"번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으" -"로 이동) 중 하나를 선택하면 됩니다." +"번역문을 상단의 텍스트 상자에 입력한 후, 번역이 만족스러우면 ``Save and continue``(번역을 저장하고 다음 미번역 " +"문장으로 이동), ``Save and stay``(번역을 저장하고 해당 페이지에 머무르기), ``Suggest`` (다른 사용자가 " +"볼 수 있도록 번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으로 이동) 중 하나를 선택하면 " +"됩니다." #: ../../source/contributor-how-to-contribute-translations.rst:54 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " "``Automatic suggestions`` (from machine translation engines), the " -"translations in ``Other languages``, and the ``History`` of translations for " -"this string." +"translations in ``Other languages``, and the ``History`` of translations " +"for this string." msgstr "" -"번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자" -"동 제안``(기계 번역의), ``다른 언어``의 번역 및 해당 문장의 번역``히스토리``" -"를 볼 수 있습니다." +"번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자동 제안``(기계 번역의), ``다른 " +"언어``의 번역 및 해당 문장의 번역``히스토리``를 볼 수 있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:59 msgid "" -"On the right, under the ``String information`` section, you can also click " -"the link under ``Source string location`` in order to view the source of the " -"doc file containing the string." -msgstr "" -"오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함" -"된 문서의 파일 소스를 볼 수도 있습니다." +"On the right, under the ``String information`` section, you can also " +"click the link under ``Source string location`` in order to view the " +"source of the doc file containing the string." +msgstr "오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함된 문서의 파일 소스를 볼 수도 있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:63 msgid "" -"For more information about translating using Weblate, you can check out this " -"`in-depth guide `_." +"For more information about translating using Weblate, you can check out " +"this `in-depth guide " +"`_." msgstr "" -"Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide `_를 확인하세요." +"Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide " +"`_를 확인하세요." #: ../../source/contributor-how-to-contribute-translations.rst:67 msgid "Add new languages" @@ -447,13 +615,12 @@ msgstr "새 언어 추가" #: ../../source/contributor-how-to-contribute-translations.rst:69 msgid "" -"If you want to add a new language, you will first have to contact us, either " -"on `Slack `_, or by opening an issue on our " -"`GitHub repo `_." +"If you want to add a new language, you will first have to contact us, " +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" -"새 언어를 추가하려면, `Slack `에 문의하거나 " -"`GitHub repo `_에서 issue에 들어가 문의 해야 " -"합니다." +"새 언어를 추가하려면, `Slack `에 문의하거나 `GitHub repo " +"`_에서 issue에 들어가 문의 해야 합니다." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" @@ -461,52 +628,49 @@ msgstr "VSCode Dev Container에서 개발" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" -"When working on the Flower framework we want to ensure that all contributors " -"use the same developer environment to format code or run tests. For this " -"purpose we are using the VSCode Remote Containers extension. What is it? " -"Read the following quote:" +"When working on the Flower framework we want to ensure that all " +"contributors use the same developer environment to format code or run " +"tests. For this purpose we are using the VSCode Remote Containers " +"extension. What is it? Read the following quote:" msgstr "" -"Flower 프레임워크 작업시, 모든 기여자들이 코드 포맷팅이나 테스트 실행을 위해 " -"동일한 개발 환경을 사용하길 원합니다. 이를 위해 VSCode Remote Containers 확장" -"을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 읽어보세요:" +"Flower 프레임워크 작업시, 모든 기여자들이 코드 포맷팅이나 테스트 실행을 위해 동일한 개발 환경을 사용하길 원합니다. 이를 " +"위해 VSCode Remote Containers 확장을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 " +"읽어보세요:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 msgid "" -"The Visual Studio Code Remote - Containers extension lets you use a Docker " -"container as a fully-featured development environment. It allows you to open " -"any folder inside (or mounted into) a container and take advantage of Visual " -"Studio Code's full feature set. A :code:`devcontainer.json` file in your " -"project tells VS Code how to access (or create) a development container with " -"a well-defined tool and runtime stack. This container can be used to run an " -"application or to separate tools, libraries, or runtimes needed for working " -"with a codebase." -msgstr "" -"Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 " -"기능을 갖춘 개발 환경으로 사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이" -"너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio Code의 모" -"든 기능을 활용할 수 있습니다. 프로젝트에 있는 :code:`devcontainer.json` 파일" -"은 잘 정의된 도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)" -"하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 애플리케이션을 실행하거나 코" -"드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 " -"있습니다." +"The Visual Studio Code Remote - Containers extension lets you use a " +"Docker container as a fully-featured development environment. It allows " +"you to open any folder inside (or mounted into) a container and take " +"advantage of Visual Studio Code's full feature set. A " +":code:`devcontainer.json` file in your project tells VS Code how to " +"access (or create) a development container with a well-defined tool and " +"runtime stack. This container can be used to run an application or to " +"separate tools, libraries, or runtimes needed for working with a " +"codebase." +msgstr "" +"Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 기능을 갖춘 개발 환경으로 " +"사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio" +" Code의 모든 기능을 활용할 수 있습니다. 프로젝트에 있는 :code:`devcontainer.json` 파일은 잘 정의된 " +"도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 " +"애플리케이션을 실행하거나 코드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 있습니다." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 msgid "" -"Workspace files are mounted from the local file system or copied or cloned " -"into the container. Extensions are installed and run inside the container, " -"where they have full access to the tools, platform, and file system. This " -"means that you can seamlessly switch your entire development environment " -"just by connecting to a different container." +"Workspace files are mounted from the local file system or copied or " +"cloned into the container. Extensions are installed and run inside the " +"container, where they have full access to the tools, platform, and file " +"system. This means that you can seamlessly switch your entire development" +" environment just by connecting to a different container." msgstr "" -"작업 공간 파일은 로컬 파일 시스템에서 마운트되거나 컨테이너에 복사 또는 클론" -"됩니다. 확장 프로그램은 컨테이너 내부에 설치되고 실행되며, 도구, 플랫폼 및 파" -"일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으" -"로 전체 개발 환경을 원활하게 전환할 수 있음을 의미합니다." +"작업 공간 파일은 로컬 파일 시스템에서 마운트되거나 컨테이너에 복사 또는 클론됩니다. 확장 프로그램은 컨테이너 내부에 설치되고 " +"실행되며, 도구, 플랫폼 및 파일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으로 전체 개발 환경을 " +"원활하게 전환할 수 있음을 의미합니다." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 msgid "" -"Source: `Official VSCode documentation `_" +"Source: `Official VSCode documentation " +"`_" msgstr "출처 : 공식 VSCode 문서" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 @@ -515,57 +679,52 @@ msgstr "시작하기" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the configuration " -"for the devcontainer can be a bit more involved. The good thing is you don't " -"have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on " -"your command line. Additionally, install the `VSCode Containers Extension " -"`_." -msgstr "" -"`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있" -"습니다. 다행히도, 이를 직접 할 필요는 없습니다. 일반적으로 시스템에 `Docker " -"`_를 설치하고 커맨드 라인에서 사용" -"할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " +"Configuring and setting up the :code:`Dockerfile` as well the " +"configuration for the devcontainer can be a bit more involved. The good " +"thing is you don't have to do it. Usually it should be enough to install " +"`Docker `_ on your system and " +"ensure its available on your command line. Additionally, install the " +"`VSCode Containers Extension `_." +msgstr "" +"`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있습니다. 다행히도, 이를 직접 할 필요는 " +"없습니다. 일반적으로 시스템에 `Docker `_를 " +"설치하고 커맨드 라인에서 사용할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " "`_을 설치하세요." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 msgid "" -"Now you should be good to go. When starting VSCode, it will ask you to run " -"in the container environment and - if you confirm - automatically build the " -"container and use it. To manually instruct VSCode to use the devcontainer, " -"you can, after installing the extension, click the green area in the bottom " -"left corner of your VSCode window and select the option *(Re)Open Folder in " -"Container*." +"Now you should be good to go. When starting VSCode, it will ask you to " +"run in the container environment and - if you confirm - automatically " +"build the container and use it. To manually instruct VSCode to use the " +"devcontainer, you can, after installing the extension, click the green " +"area in the bottom left corner of your VSCode window and select the " +"option *(Re)Open Folder in Container*." msgstr "" -"이제 준비가 완료되었습니다. VSCode를 시작하면 컨테이너 환경에서 실행할지를 묻" -"고, 확인하면 자동으로 컨테이너를 빌드하고 사용할 것입니다. VSCode에 수동으로 " -"개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하" -"단에 있는 초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세" -"요." +"이제 준비가 완료되었습니다. VSCode를 시작하면 컨테이너 환경에서 실행할지를 묻고, 확인하면 자동으로 컨테이너를 빌드하고 사용할" +" 것입니다. VSCode에 수동으로 개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하단에 있는 " +"초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세요." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 msgid "" -"In some cases your setup might be more involved. For those cases consult the " -"following sources:" -msgstr "" -"경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조" -"하세요:" +"In some cases your setup might be more involved. For those cases consult " +"the following sources:" +msgstr "경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조하세요:" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 msgid "" -"`Developing inside a Container `_" +"`Developing inside a Container " +"`_" msgstr "" -"`컨테이너 내부 개발`_" +"`컨테이너 내부 개발`_" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 msgid "" -"`Remote development in Containers `_" -msgstr "" -"`컨테이너 원격 개발`_" +"`Remote development in Containers " +"`_" +msgstr "`컨테이너 원격 개발`_" #: ../../source/contributor-how-to-install-development-versions.rst:2 msgid "Install development versions" @@ -581,20 +740,19 @@ msgstr "Poetry 사용하기(권장)" #: ../../source/contributor-how-to-install-development-versions.rst:10 msgid "" -"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in " -"``pyproject.toml`` and then reinstall (don't forget to delete ``poetry." -"lock`` (``rm poetry.lock``) before running ``poetry install``)." +"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency " +"in ``pyproject.toml`` and then reinstall (don't forget to delete " +"``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" -"PyPI에서 ``flwr`` 사전 릴리스 설치하기: ``pyproject.toml``에서 ``flwr``의 " -"의존성을 업데이트한 다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` " -"(``rm poetry.lock``)를 제거하는 것을 잊지 마세요)." +"PyPI에서 ``flwr`` 사전 릴리스 설치하기: ``pyproject.toml``에서 ``flwr``의 의존성을 업데이트한 " +"다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` (``rm poetry.lock``)를 제거하는 것을" +" 잊지 마세요)." #: ../../source/contributor-how-to-install-development-versions.rst:12 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" -msgstr "" -"``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" +msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" #: ../../source/contributor-how-to-install-development-versions.rst:13 msgid "" @@ -606,11 +764,9 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" -"Install ``flwr`` from a local copy of the Flower source code via ``pyproject." -"toml``:" -msgstr "" -"``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치" -"하세요:" +"Install ``flwr`` from a local copy of the Flower source code via " +"``pyproject.toml``:" +msgstr "``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치하세요:" #: ../../source/contributor-how-to-install-development-versions.rst:17 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" @@ -618,11 +774,11 @@ msgstr "``flwr = { path = \"../../\", develop = true }`` (extras 제외)" #: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " -"(with extras)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (with extras)" msgstr "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " -"(extras 포함)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (extras 포함)" #: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" @@ -630,11 +786,11 @@ msgstr "``pyproject.toml``을 통해 로컬 wheel file에서 ``flwr``을 설치 #: ../../source/contributor-how-to-install-development-versions.rst:22 msgid "" -"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without " -"extras)" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" +" extras)" msgstr "" -"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras 제" -"외)" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras " +"제외)" #: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "" @@ -650,8 +806,8 @@ msgid "" "Dependency Specification `_" msgstr "" -"자세한 내용은 Poetry 문서를 참고하세요: `Poetry Dependency Specification " -"`_" +"자세한 내용은 Poetry 문서를 참고하세요: `Poetry Dependency Specification `_" #: ../../source/contributor-how-to-install-development-versions.rst:28 msgid "Using pip (recommended on Colab)" @@ -674,8 +830,8 @@ msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" -"Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용" -"하여 GitHub에서 직접 Flower를 설치하세요." +"Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용하여 GitHub에서 직접 Flower를 " +"설치하세요." #: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "Install ``flwr`` from the default GitHub branch (``main``):" @@ -683,17 +839,17 @@ msgstr "기본 GitHub branch (``main``)에서 ``flwr`` 를 설치하기:" #: ../../source/contributor-how-to-install-development-versions.rst:39 msgid "" -"``pip install flwr@git+https://github.com/adap/flower.git`` (without extras)" -msgstr "" -"``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" +"``pip install flwr@git+https://github.com/adap/flower.git`` (without " +"extras)" +msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(extras 포함)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (extras 포함)" #: ../../source/contributor-how-to-install-development-versions.rst:42 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" @@ -709,11 +865,11 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-" -"name'`` (with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-" -"name'`` (extras 포함)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (extras 포함)" #: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Open Jupyter Notebooks on Google Colab" @@ -724,32 +880,32 @@ msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" -"``doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb``notebook" -"을 엽니다:" +"``doc/source/tutorial-series-get-started-with-flower-" +"pytorch.ipynb``notebook을 엽니다:" #: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" -"tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" -"tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-series-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:55 msgid "" -"Open a development version of the same notebook from branch `branch-name` by " -"changing ``main`` to ``branch-name`` (right after ``blob``):" +"Open a development version of the same notebook from branch `branch-name`" +" by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -"``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 " -"개발 버전을 브랜치 `branch-name`에서 엽니다 :" +"``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 개발 버전을 브랜치 " +"`branch-name`에서 엽니다 :" #: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" -"source/tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" -"source/tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" #: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "Install a `whl` on Google Colab:" @@ -757,11 +913,9 @@ msgstr "Google Colab에서 `whl` 설치하기:" #: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" -"In the vertical icon grid on the left hand side, select ``Files`` > ``Upload " -"to session storage``" -msgstr "" -"왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선" -"택하세요" +"In the vertical icon grid on the left hand side, select ``Files`` > " +"``Upload to session storage``" +msgstr "왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선택하세요" #: ../../source/contributor-how-to-install-development-versions.rst:62 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" @@ -769,13 +923,13 @@ msgstr "whl (예:``flwr-1.8.0-py3-none-any.whl``)을 업로드하세요" #: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" -"Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` " -"to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " -"torchvision matplotlib``" +"Change ``!pip install -q 'flwr[simulation]' torch torchvision " +"matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" +"any.whl[simulation]' torch torchvision matplotlib``" msgstr "" -"``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``를 ``!pip " -"install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision " -"matplotlib``로 바꾸세요" +"``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``를 " +"``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " +"torchvision matplotlib``로 바꾸세요" #: ../../source/contributor-how-to-release-flower.rst:2 msgid "Release Flower" @@ -785,8 +939,7 @@ msgstr "Flower 릴리즈 하기" msgid "" "This document describes the current release process. It may or may not " "change in the future." -msgstr "" -"이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." +msgstr "이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." #: ../../source/contributor-how-to-release-flower.rst:7 msgid "During the release" @@ -794,11 +947,12 @@ msgstr "릴리즈 동안에" #: ../../source/contributor-how-to-release-flower.rst:9 msgid "" -"The version number of a release is stated in ``pyproject.toml``. To release " -"a new version of Flower, the following things need to happen (in that order):" +"The version number of a release is stated in ``pyproject.toml``. To " +"release a new version of Flower, the following things need to happen (in " +"that order):" msgstr "" -"릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전" -"을 릴리즈하려면 다음 작업이 순서대로 수행되어야 합니다:" +"릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전을 릴리즈하려면 다음 작업이 " +"순서대로 수행되어야 합니다:" #: ../../source/contributor-how-to-release-flower.rst:11 msgid "" @@ -806,233 +960,164 @@ msgid "" "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -"모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 src/py/flwr_tool/" -"update_changelog.py ``을 실행합니다 (변경 로그가 만족스러워질 " -"때까지 수동으로 변경해도 됩니다)." +"모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 " +"src/py/flwr_tool/update_changelog.py ``을 실행합니다 (변경 로그가 " +"만족스러워질 때까지 수동으로 변경해도 됩니다)." #: ../../source/contributor-how-to-release-flower.rst:12 msgid "" -"Once the changelog has been updated with all the changes, run ``./dev/" -"prepare-release-changelog.sh v``, where ```` is " -"the version stated in ``pyproject.toml`` (notice the ``v`` added before it). " -"This will replace the ``Unreleased`` header of the changelog by the version " -"and current date, and it will add a thanking message for the contributors. " -"Open a pull request with those changes." +"Once the changelog has been updated with all the changes, run ``./dev" +"/prepare-release-changelog.sh v``, where ```` " +"is the version stated in ``pyproject.toml`` (notice the ``v`` added " +"before it). This will replace the ``Unreleased`` header of the changelog " +"by the version and current date, and it will add a thanking message for " +"the contributors. Open a pull request with those changes." msgstr "" -"모든 변경 사항으로 변경 로그가 업데이트되면,``./dev/prepare-release-" -"changelog.sh v``을 실행합니다. 여기서 ````은 " -"``pyproject.toml``에 명시된 버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하" -"세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과 현재 날짜로 " -"교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull " -"request합니다." +"모든 변경 사항으로 변경 로그가 업데이트되면,``./dev/prepare-release-changelog.sh " +"v``을 실행합니다. 여기서 ````은 ``pyproject.toml``에 명시된 " +"버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과" +" 현재 날짜로 교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull request합니다." #: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Once the pull request is merged, tag the release commit with the version " -"number as soon as the PR is merged: ``git tag v`` (notice the " -"``v`` added before the version number), then ``git push --tags``. This will " -"create a draft release on GitHub containing the correct artifacts and the " -"relevant part of the changelog." +"number as soon as the PR is merged: ``git tag v`` (notice " +"the ``v`` added before the version number), then ``git push --tags``. " +"This will create a draft release on GitHub containing the correct " +"artifacts and the relevant part of the changelog." msgstr "" -"pull request가 병합되면, PR이 병합되는 즉시 버전 번호로 릴리즈 커밋에 태그를 " -"지정합니다:``git tag v`` (버전 번호 앞에 ``v``가 추가된 것을 확" -"인), 그 다음 ``git push --tags``. 이렇게 하면 올바른 아티팩트와 변경 로그의 " -"관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." +"pull request가 병합되면, PR이 병합되는 즉시 버전 번호로 릴리즈 커밋에 태그를 지정합니다:``git tag " +"v`` (버전 번호 앞에 ``v``가 추가된 것을 확인), 그 다음 ``git push --tags``. " +"이렇게 하면 올바른 아티팩트와 변경 로그의 관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." #: ../../source/contributor-how-to-release-flower.rst:14 -msgid "" -"Check the draft release on GitHub, and if everything is good, publish it." +msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "GitHub에서 릴리즈 초안을 확인하고, 모든 것이 양호하면 게시하세요." -#: ../../source/contributor-how-to-release-flower.rst:15 -msgid "Trigger the CI for building the Docker images." -msgstr "Docker 이미지 빌드를 위해 CI를 트리거합니다." - #: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a ``workflow_dispatch`` " -"event in the GitHub CI. This can be done either through the UI or via the " -"GitHub CLI. The event requires only one input, the Flower version, to be " -"released." -msgstr "" -"워크플로우를 트리거하려면 공동 작업자가 GitHub CI에서 ``workflow_dispatch``" -"를 생성해야 합니다. 이 작업은 UI 또는 GitHub CLI 를 통해 수행할 수 있습니다. " -"이벤트는 Flower 버전 한 가지 입력만 필요합니다." - -#: ../../source/contributor-how-to-release-flower.rst:21 -msgid "**Via the UI**" -msgstr "**UI를 통해서**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page `_." -msgstr "" -"``Build docker images`` 워크플로우 `페이지 `_로 이동합니다." - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower in " -"the ``Version of Flower`` input field." -msgstr "" -"``Run workflow`` 버튼을 누르고 ``Version of Flower``에 Flower의 새버전을 입력" -"합니다." - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "**초록색**의 ``Run workflow``버튼을 클릭합니다." - -#: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "**GitHub CI를 통해서**" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" -"``gh auth login``을 통해 로그인 했는지, 현재 작업 디렉토리가 Flower 리포지토" -"리의 root인지 확인하세요." - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" -"``gh workflow run docker-images.yml -f flwr-version=``을 통해 워" -"크플로우 를 트리거합니다." - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "릴리즈 후에" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" msgstr "다음 변경 사항이 포함된 pull request를 만듭니다:" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "``pyproject.toml``의 마이너 버전을 하나씩 늘립니다." -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." msgstr "필요한 경우 현재 버전 번호가 포함된 모든 파일을 업데이트합니다." -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "``changelog.md``에 ``Unreleased`` 섹션을 새로 추가합니다." -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release " -"gets published to PyPI)." -msgstr "" -"pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합" -"하세요." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." +msgstr "pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합하세요." -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" msgstr "사전 릴리즈 게시" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" msgstr "사전 릴리즈 이름" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" -msgstr "" -"PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드" -"시 다음 명명 패턴 중 하나를 사용해야 합니다:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" +msgstr "PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드시 다음 명명 패턴 중 하나를 사용해야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha: ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Beta: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" msgstr "예시:" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" -msgstr "" -"이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" +msgstr "이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" -"`PyPA Choosing a versioning scheme `_" +"`PyPA Choosing a versioning scheme " +"`_" msgstr "" -"`PyPA 버전 관리 체계 선택하기 `_" +"`PyPA 버전 관리 체계 선택하기 `_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" -"Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 " -"spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on " -"precedence)." +"Note that the approach defined by PyPA is not compatible with SemVer " +"2.0.0 spec, for details consult the `Semantic Versioning Specification " +"`_ (specifically item " +"11 on precedence)." msgstr "" -"PyPA에서 정의한 접근 방식은 SemVer 2.0.0 사양과 호환되지 않으며, 자세한 내용" -"은`Semantic Versioning 관리 사양 `_ (특히 항목 11이 우선순위)을 참조하세요." +"PyPA에서 정의한 접근 방식은 SemVer 2.0.0 사양과 호환되지 않으며, 자세한 내용은`Semantic Versioning " +"관리 사양 `_ (특히 항목 11이 " +"우선순위)을 참조하세요." -#: ../../source/contributor-how-to-release-flower.rst:73 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "사전 릴리즈 분류" -#: ../../source/contributor-how-to-release-flower.rst:75 -msgid "" -"Should the next pre-release be called alpha, beta, or release candidate?" +#: ../../source/contributor-how-to-release-flower.rst:57 +msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "다음 사전 릴리즈를 알파, 베타 또는 릴리스 후보라고 불러야 하나요?" -#: ../../source/contributor-how-to-release-flower.rst:77 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" -"RC: feature complete, no known issues (apart from issues that are classified " -"as \"won't fix\" for the next stable release) - if no issues surface this " -"will become the next stable release" +"RC: feature complete, no known issues (apart from issues that are " +"classified as \"won't fix\" for the next stable release) - if no issues " +"surface this will become the next stable release" msgstr "" -"RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 " -"분류된 문제 제외) - 문제가 나타나지 않으면 다음 stable 릴리즈가 됩니다" +"RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 분류된 문제 제외) - 문제가 나타나지 " +"않으면 다음 stable 릴리즈가 됩니다" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" msgstr "베타: 기능 완료, 알려진 문제 발생 가능" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "알파: 기능 미완성, 알려진 문제가 있을 수 있음" @@ -1044,12 +1129,11 @@ msgstr "가상 환경 설정" msgid "" "It is recommended to run your Python setup within a virtual environment. " "This guide shows three different examples how to create a virtual " -"environment with pyenv virtualenv, poetry, or Anaconda. You can follow the " -"instructions or choose your preferred setup." +"environment with pyenv virtualenv, poetry, or Anaconda. You can follow " +"the instructions or choose your preferred setup." msgstr "" -"가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv " -"virtualenv, poetry 또는 Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제" -"를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." +"가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv virtualenv, poetry 또는 " +"Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 msgid "Python Version" @@ -1058,21 +1142,23 @@ msgstr "Python 버전" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 #: ../../source/how-to-install-flower.rst:8 msgid "" -"Flower requires at least `Python 3.8 `_, but " -"`Python 3.10 `_ or above is recommended." +"Flower requires at least `Python 3.9 `_, " +"but `Python 3.10 `_ or above is " +"recommended." msgstr "" -"Flower는 `Python 3.8 `_이상이 필요하지만, " -"`Python 3.10 `_이상을 권장합니다." +"Flower는 `Python 3.9 `_이상이 필요하지만, `Python " +"3.10 `_이상을 권장합니다." #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 msgid "" -"Due to a known incompatibility with `ray `_, " -"we currently recommend utilizing at most `Python 3.11 `_ for running Flower simulations." +"Due to a known incompatibility with `ray " +"`_, we currently recommend utilizing at " +"most `Python 3.11 `_ for running Flower " +"simulations." msgstr "" -"`Ray `__와 호환되지 않는 것으로 알려져 있으므" -"로, 현재 Flower 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이 좋습니다." +"`Ray `__와 호환되지 않는 것으로 알려져 있으므로, 현재 Flower" +" 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이" +" 좋습니다." #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 msgid "Virtualenv with Pyenv/Virtualenv" @@ -1080,23 +1166,22 @@ msgstr "Pyenv/Virtualenv를 사용한 가상 환경" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 msgid "" -"One of the recommended virtual environment is `pyenv `_/`virtualenv `_. " -"Please see `Flower examples `_ for details." +"One of the recommended virtual environment is `pyenv " +"`_/`virtualenv `_. Please see `Flower examples " +"`_ for details." msgstr "" -"권장 가상 환경 중 하나는 `pyenv `_/" -"`virtualenv `_입니다. 자세한 내용" -"은 `Flower examples `_를 " -"참조하세요." +"권장 가상 환경 중 하나는 `pyenv `_/`virtualenv " +"`_입니다. 자세한 내용은 `Flower " +"examples `_를 참조하세요." #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" -"Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ 이상'을 설치할 수 있습니다:" +"Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ " +"이상'을 설치할 수 있습니다:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "Create the virtualenv with:" @@ -1112,21 +1197,18 @@ msgstr "Poetry를 사용한 가상 환경" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 msgid "" -"The Flower examples are based on `Poetry `_ " -"to manage dependencies. After installing Poetry you simply create a virtual " -"environment with:" +"The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " +"simply create a virtual environment with:" msgstr "" -"Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 " -"됩니다:" +"Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 됩니다:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 msgid "" -"If you open a new terminal you can activate the previously created virtual " -"environment with the following command:" -msgstr "" -"새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 " -"있습니다:" +"If you open a new terminal you can activate the previously created " +"virtual environment with the following command:" +msgstr "새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 있습니다:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 msgid "Virtualenv with Anaconda" @@ -1134,14 +1216,15 @@ msgstr "Anaconda를 사용한 가상 환경" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 msgid "" -"If you prefer to use Anaconda for your virtual environment then install and " -"setup the `conda `_ package. After setting it up you can create a virtual " -"environment with:" +"If you prefer to use Anaconda for your virtual environment then install " +"and setup the `conda `_ package. After setting it up you can " +"create a virtual environment with:" msgstr "" -"가상 환경에서 Anaconda를 사용하려면 `conda `_ 패키지를 설치 및 " -"설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 있습니다:" +"가상 환경에서 Anaconda를 사용하려면 `conda " +"`_ 패키지를 설치 및 설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 " +"있습니다:" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 msgid "and activate the virtual environment with:" @@ -1153,11 +1236,11 @@ msgstr "그다음은?" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 msgid "" -"As soon as you created your virtual environment you clone one of the `Flower " -"examples `_." +"As soon as you created your virtual environment you clone one of the " +"`Flower examples `_." msgstr "" -"가상 환경을 생성하자마자 'Flower examples `_ 중 하나를 클론합니다." +"가상 환경을 생성하자마자 'Flower examples " +"`_ 중 하나를 클론합니다." #: ../../source/contributor-how-to-write-documentation.rst:2 msgid "Write documentation" @@ -1169,24 +1252,23 @@ msgstr "프로젝트 레이아웃" #: ../../source/contributor-how-to-write-documentation.rst:8 msgid "" -"The Flower documentation lives in the ``doc`` directory. The Sphinx-based " -"documentation system supports both reStructuredText (``.rst`` files) and " -"Markdown (``.md`` files)." +"The Flower documentation lives in the ``doc`` directory. The Sphinx-based" +" documentation system supports both reStructuredText (``.rst`` files) and" +" Markdown (``.md`` files)." msgstr "" -"Flower 문서는 ``doc`` 디렉토리에 있습니다. Sphinx 기반 문서 시스템은 " -"reStructuredText 텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합" -"니다." +"Flower 문서는 ``doc`` 디렉토리에 있습니다. Sphinx 기반 문서 시스템은 reStructuredText " +"텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합니다." #: ../../source/contributor-how-to-write-documentation.rst:10 #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 msgid "" -"Note that, in order to build the documentation locally (with ``poetry run " -"make html``, like described below), `Pandoc `_ needs to be installed on the system." +"Note that, in order to build the documentation locally (with ``poetry run" +" make html``, like described below), `Pandoc " +"`_ needs to be installed on the " +"system." msgstr "" -"로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) " -"`Pandoc `_이 시스템에 설치되어 있어야 합" -"니다." +"로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) `Pandoc " +"`_이 시스템에 설치되어 있어야 합니다." #: ../../source/contributor-how-to-write-documentation.rst:14 msgid "Edit an existing page" @@ -1199,8 +1281,7 @@ msgstr "doc/source/``에서 기존 ``.rst``(또는 ``.md``) 파일을 편집합 #: ../../source/contributor-how-to-write-documentation.rst:17 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" -msgstr "" -"문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" +msgstr "문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" #: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:28 @@ -1229,14 +1310,13 @@ msgstr "훌륭한 첫 번째 기여" #: ../../source/contributor-ref-good-first-contributions.rst:4 msgid "" -"We welcome contributions to Flower! However, it is not always easy to know " -"where to start. We therefore put together a few recommendations on where to " -"start to increase your chances of getting your PR accepted into the Flower " -"codebase." +"We welcome contributions to Flower! However, it is not always easy to " +"know where to start. We therefore put together a few recommendations on " +"where to start to increase your chances of getting your PR accepted into " +"the Flower codebase." msgstr "" -"Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 " -"않습니다. 그래서 저희는 여러분의 PR이 Flower 코드베이스에 채택될 가능성을 높" -"이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." +"Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 않습니다. 그래서 저희는 여러분의 PR이 " +"Flower 코드베이스에 채택될 가능성을 높이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." #: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "Where to start" @@ -1244,13 +1324,12 @@ msgstr "시작 위치" #: ../../source/contributor-ref-good-first-contributions.rst:13 msgid "" -"Until the Flower core library matures it will be easier to get PR's accepted " -"if they only touch non-core areas of the codebase. Good candidates to get " -"started are:" +"Until the Flower core library matures it will be easier to get PR's " +"accepted if they only touch non-core areas of the codebase. Good " +"candidates to get started are:" msgstr "" -"Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 " -"것이 PR을 승인받기가 더 쉬울 것입니다. 시작하기에 좋은 후보자는 다음과 같습니" -"다:" +"Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 것이 PR을 승인받기가 더 쉬울 것입니다. " +"시작하기에 좋은 후보자는 다음과 같습니다:" #: ../../source/contributor-ref-good-first-contributions.rst:17 msgid "Documentation: What's missing? What could be expressed more clearly?" @@ -1270,32 +1349,32 @@ msgstr "Flower Baselines 요청" #: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" -"If you are not familiar with Flower Baselines, you should probably check-out " -"our `contributing guide for baselines `_." +"If you are not familiar with Flower Baselines, you should probably check-" +"out our `contributing guide for baselines " +"`_." msgstr "" -"Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 `_를 확인해보세요." +"Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 " +"`_를 " +"확인해보세요." #: ../../source/contributor-ref-good-first-contributions.rst:27 msgid "" -"You should then check out the open `issues `_ for baseline " -"requests. If you find a baseline that you'd like to work on and that has no " -"assignees, feel free to assign it to yourself and start working on it!" +"You should then check out the open `issues " +"`_" +" for baseline requests. If you find a baseline that you'd like to work on" +" and that has no assignees, feel free to assign it to yourself and start " +"working on it!" msgstr "" -"그런 다음 오픈 된 `issues `_에서 baseline " -"요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, " -"자유롭게 자신에게 할당하고 작업을 시작하세요!" +"그런 다음 오픈 된 `issues " +"`_에서" +" baseline 요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, 자유롭게 자신에게 할당하고 작업을 " +"시작하세요!" #: ../../source/contributor-ref-good-first-contributions.rst:31 msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to " -"open a new issue with the baseline request template!" -msgstr "" -"그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 " -"새 이슈를 열어야 합니다!" +"Otherwise, if you don't find a baseline you'd like to work on, be sure to" +" open a new issue with the baseline request template!" +msgstr "그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 새 이슈를 열어야 합니다!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" @@ -1304,12 +1383,11 @@ msgstr "예시 요청" #: ../../source/contributor-ref-good-first-contributions.rst:36 msgid "" "We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are a " -"few ideas where we'd be happy to accept a PR:" +"help users to get started with building what they want to build. Here are" +" a few ideas where we'd be happy to accept a PR:" msgstr "" -"사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 " -"더 많은 시간을 할애하여 작성할 수 있었으면 합니다. 다음은 저희가 기꺼이 PR을 " -"수락할 수 있는 몇 가지 아이디어입니다:" +"사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 더 많은 시간을 할애하여 작성할 수 있었으면 합니다. " +"다음은 저희가 기꺼이 PR을 수락할 수 있는 몇 가지 아이디어입니다:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" @@ -1329,15 +1407,14 @@ msgstr "Secure Aggregation 프로토콜" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol " -"has not been implemented yet, so its diagram and abstraction may not be " -"accurate in practice. The SecAgg protocol can be considered as a special " -"case of the SecAgg+ protocol." +"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " +"protocol has not been implemented yet, so its diagram and abstraction may" +" not be accurate in practice. The SecAgg protocol can be considered as a " +"special case of the SecAgg+ protocol." msgstr "" -"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아" -"직 구현되지 않았기 때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있" -"습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 간주할 수 있습니" -"다." +"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아직 구현되지 않았기 " +"때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 " +"간주할 수 있습니다." #: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 msgid "The :code:`SecAgg+` abstraction" @@ -1347,18 +1424,17 @@ msgstr "The :code:`SecAgg+` 추상화" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 msgid "" "In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have " -"keys of int type rather than ClientProxy type." +"(int) for secure aggregation, and thus many python dictionaries used have" +" keys of int type rather than ClientProxy type." msgstr "" -"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당" -"되므로 사용되는 많은 파이썬 dictionaries에는 ClientProxy 타입이 아닌 int 타입" -"의 키가 있습니다." +"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " +"dictionaries에는 ClientProxy 타입이 아닌 int 타입의 키가 있습니다." #: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 #: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 msgid "" -"The Flower server will execute and process received results in the following " -"order:" +"The Flower server will execute and process received results in the " +"following order:" msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 @@ -1375,20 +1451,18 @@ msgstr "GitHub에서 기여하기" #: ../../source/contributor-tutorial-contribute-on-github.rst:4 msgid "" -"This guide is for people who want to get involved with Flower, but who are " -"not used to contributing to GitHub projects." -msgstr "" -"이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 " -"않은 분들을 위한 것입니다." +"This guide is for people who want to get involved with Flower, but who " +"are not used to contributing to GitHub projects." +msgstr "이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 않은 분들을 위한 것입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:6 msgid "" -"If you're familiar with how contributing on GitHub works, you can directly " -"checkout our :doc:`getting started guide for contributors `." +"If you're familiar with how contributing on GitHub works, you can " +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -"깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드" -"`를 직접 확인하세요." +"깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드`를 직접 확인하세요." #: ../../source/contributor-tutorial-contribute-on-github.rst:10 msgid "Setting up the repository" @@ -1403,41 +1477,37 @@ msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" -"Git은 분산 버전 관리 도구입니다. 이를 통해 전체 코드베이스의 히스토리와 모든 " -"개발자의 컴퓨터를 저장할 수 있습니다. 로컬 컴퓨터에 설치해야 하는 소프트웨어" -"로, 이 `가이드 `_를 따라 설정할 수 있습니다." +"Git은 분산 버전 관리 도구입니다. 이를 통해 전체 코드베이스의 히스토리와 모든 개발자의 컴퓨터를 저장할 수 있습니다. 로컬 " +"컴퓨터에 설치해야 하는 소프트웨어로, 이 `가이드 `_를 따라 설정할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:16 msgid "" "GitHub, itself, is a code hosting platform for version control and " -"collaboration. It allows for everyone to collaborate and work from anywhere " -"on remote repositories." +"collaboration. It allows for everyone to collaborate and work from " +"anywhere on remote repositories." msgstr "" -"GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 " -"원격 레포지토리에서 어디서든 협업하고 작업할 수 있습니다." +"GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 원격 레포지토리에서 어디서든 협업하고 작업할 " +"수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:18 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." -msgstr "" -"아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 " -"만들어야 합니다." +msgstr "아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 만들어야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:20 msgid "" -"The idea behind the generic Git and GitHub workflow boils down to this: you " -"download code from a remote repository on GitHub, make changes locally and " -"keep track of them using Git and then you upload your new history back to " -"GitHub." +"The idea behind the generic Git and GitHub workflow boils down to this: " +"you download code from a remote repository on GitHub, make changes " +"locally and keep track of them using Git and then you upload your new " +"history back to GitHub." msgstr "" -"일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. " -"GitHub의 원격 레포지토리에서 코드를 다운로드하고 로컬에서 변경한 후 Git을 사" -"용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." +"일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. GitHub의 원격 레포지토리에서 코드를 " +"다운로드하고 로컬에서 변경한 후 Git을 사용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" @@ -1445,25 +1515,24 @@ msgstr "**Flower 레포지토리 포크하기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:24 msgid "" -"A fork is a personal copy of a GitHub repository. To create one for Flower, " -"you must navigate to ``_ (while connected to " -"your GitHub account) and click the ``Fork`` button situated on the top right " -"of the page." +"A fork is a personal copy of a GitHub repository. To create one for " +"Flower, you must navigate to ``_ (while " +"connected to your GitHub account) and click the ``Fork`` button situated " +"on the top right of the page." msgstr "" "포크는 GitHub 리포지토리의 개인 복사본입니다. Flower용 포크를 만들려면 " -"``_로 이동하여(GitHub 계정에 연결된 상태에" -"서) 페이지 오른쪽 상단에 있는 ``포크`` 버튼을 클릭해야 합니다." +"``_로 이동하여(GitHub 계정에 연결된 상태에서) 페이지 오른쪽 " +"상단에 있는 ``포크`` 버튼을 클릭해야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "" "You can change the name if you want, but this is not necessary as this " -"version of Flower will be yours and will sit inside your own account (i.e., " -"in your own list of repositories). Once created, you should see on the top " -"left corner that you are looking at your own version of Flower." +"version of Flower will be yours and will sit inside your own account " +"(i.e., in your own list of repositories). Once created, you should see on" +" the top left corner that you are looking at your own version of Flower." msgstr "" -"원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신" -"의 리포지토리 목록)에 위치하게 되므로 변경할 필요는 없습니다. 만들기가 완료되" -"면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." +"원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신의 리포지토리 목록)에 위치하게 되므로 변경할" +" 필요는 없습니다. 만들기가 완료되면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" @@ -1472,29 +1541,24 @@ msgstr "**포크된 레포지토리 클론하기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:35 msgid "" "The next step is to download the forked repository on your machine to be " -"able to make changes to it. On your forked repository page, you should first " -"click on the ``Code`` button on the right, this will give you the ability to " -"copy the HTTPS link of the repository." +"able to make changes to it. On your forked repository page, you should " +"first click on the ``Code`` button on the right, this will give you the " +"ability to copy the HTTPS link of the repository." msgstr "" -"다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입" -"니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 ``Code`` 버튼을 클릭하면 레포" -"지토리의 HTTPS 링크를 복사할 수 있습니다." +"다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 " +"``Code`` 버튼을 클릭하면 레포지토리의 HTTPS 링크를 복사할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:41 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" -msgstr "" -"\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위" -"치로 이동하여 입력하면 됩니다:" +msgstr "\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위치로 이동하여 입력하면 됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "" -"This will create a ``flower/`` (or the name of your fork if you renamed it) " -"folder in the current working directory." -msgstr "" -"현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더" -"가 생성됩니다." +"This will create a ``flower/`` (or the name of your fork if you renamed " +"it) folder in the current working directory." +msgstr "현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더가 생성됩니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" @@ -1506,14 +1570,13 @@ msgstr "그런 다음 레포지토리 폴더로 이동할 수 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:56 msgid "" -"And here we will need to add an origin to our repository. The origin is the " -"\\ of the remote fork repository. To obtain it, we can do as " -"previously mentioned by going to our fork repository on our GitHub account " -"and copying the link." +"And here we will need to add an origin to our repository. The origin is " +"the \\ of the remote fork repository. To obtain it, we can do as " +"previously mentioned by going to our fork repository on our GitHub " +"account and copying the link." msgstr "" -"여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리" -"의 \\입니다. origin을 얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레" -"포지토리로 이동하여 링크를 복사하면 됩니다." +"여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리의 \\입니다. origin을 " +"얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레포지토리로 이동하여 링크를 복사하면 됩니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:61 msgid "" @@ -1529,34 +1592,29 @@ msgstr "**Upstream 추가하기**" msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" -msgstr "" -"이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다" -"음 명령을 실행해야 합니다:" +msgstr "이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다음 명령을 실행해야 합니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:76 -msgid "" -"The following diagram visually explains what we did in the previous steps:" +msgid "The following diagram visually explains what we did in the previous steps:" msgstr "다음 다이어그램은 이전 단계에서 수행한 작업을 시각적으로 설명합니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:80 msgid "" -"The upstream is the GitHub remote address of the parent repository (in this " -"case Flower), i.e. the one we eventually want to contribute to and therefore " -"need an up-to-date history of. The origin is just the GitHub remote address " -"of the forked repository we created, i.e. the copy (fork) in our own account." +"The upstream is the GitHub remote address of the parent repository (in " +"this case Flower), i.e. the one we eventually want to contribute to and " +"therefore need an up-to-date history of. The origin is just the GitHub " +"remote address of the forked repository we created, i.e. the copy (fork) " +"in our own account." msgstr "" -"upstream은 부모 레포지토리(이 경우 Flower)의 GitHub 원격 주소, 즉 우리가 최종" -"적으로 기여하고 싶고 따라서 최신 기록이 필요한 레포지토리입니다. origin은 우" -"리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 사본(포" -"크)입니다." +"upstream은 부모 레포지토리(이 경우 Flower)의 GitHub 원격 주소, 즉 우리가 최종적으로 기여하고 싶고 따라서 최신" +" 기록이 필요한 레포지토리입니다. origin은 우리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 " +"사본(포크)입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:84 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" -msgstr "" -"로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인" -"하려면 다음 명령을 실행하면 됩니다:" +msgstr "로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인하려면 다음 명령을 실행하면 됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:93 msgid "Setting up the coding environment" @@ -1565,13 +1623,13 @@ msgstr "코딩 환경 설정" #: ../../source/contributor-tutorial-contribute-on-github.rst:95 msgid "" "This can be achieved by following this :doc:`getting started guide for " -"contributors ` (note that " -"you won't need to clone the repository). Once you are able to write code and " -"test it, you can finally start making changes!" +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" -":doc:'기여자를 위한 시작 가이드 '를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 " -"작성하고 테스트할 수 있게 되면 드디어 변경을 시작할 수 있습니다!" +":doc:'기여자를 위한 시작 가이드 '를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 작성하고 테스트할 수 있게 되면 드디어" +" 변경을 시작할 수 있습니다!" #: ../../source/contributor-tutorial-contribute-on-github.rst:100 msgid "Making changes" @@ -1579,7 +1637,8 @@ msgstr "변경하기" #: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "" -"Before making any changes make sure you are up-to-date with your repository:" +"Before making any changes make sure you are up-to-date with your " +"repository:" msgstr "변경하기 전에 레포지토리를 최신 상태로 유지하세요:" #: ../../source/contributor-tutorial-contribute-on-github.rst:108 @@ -1592,15 +1651,15 @@ msgstr "**새 브랜치 만들기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" -"To make the history cleaner and easier to work with, it is good practice to " -"create a new branch for each feature/project that needs to be implemented." -msgstr "" -"히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트" -"에 대해 새 브랜치를 만드는 것이 좋습니다." +"To make the history cleaner and easier to work with, it is good practice " +"to create a new branch for each feature/project that needs to be " +"implemented." +msgstr "히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트에 대해 새 브랜치를 만드는 것이 좋습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:118 msgid "" -"To do so, just run the following command inside the repository's directory:" +"To do so, just run the following command inside the repository's " +"directory:" msgstr "이렇게 하려면 레포지토리 디렉토리에서 다음 명령을 실행하면 됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:125 @@ -1608,8 +1667,7 @@ msgid "**Make changes**" msgstr "**변경하기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:125 -msgid "" -"Write great code and create wonderful changes using your favorite editor!" +msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "선호하는 편집기를 사용하여 멋진 코드를 작성하고 훌륭한 변화를 만들어 보세요!" #: ../../source/contributor-tutorial-contribute-on-github.rst:138 @@ -1618,13 +1676,12 @@ msgstr "**코드 테스트 및 서식 지정**" #: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" -"Don't forget to test and format your code! Otherwise your code won't be able " -"to be merged into the Flower repository. This is done so the codebase stays " -"consistent and easy to understand." +"Don't forget to test and format your code! Otherwise your code won't be " +"able to be merged into the Flower repository. This is done so the " +"codebase stays consistent and easy to understand." msgstr "" -"코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 " -"Flower 레포지토리에 병합할 수 없습니다. 이는 코드베이스가 일관성을 유지하고 " -"이해하기 쉽도록 하기 위한 것입니다." +"코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 Flower 레포지토리에 병합할 수 없습니다. 이는 " +"코드베이스가 일관성을 유지하고 이해하기 쉽도록 하기 위한 것입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "To do so, we have written a few scripts that you can execute:" @@ -1632,15 +1689,13 @@ msgstr "이를 위해 실행할 수 있는 몇 가지 스크립트를 작성했 #: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" -msgstr "**Stage 변경**" +msgstr "**변경사항 스테이징**" #: ../../source/contributor-tutorial-contribute-on-github.rst:141 msgid "" -"Before creating a commit that will update your history, you must specify to " -"Git which files it needs to take into account." -msgstr "" -"기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해" -"야 합니다." +"Before creating a commit that will update your history, you must specify " +"to Git which files it needs to take into account." +msgstr "기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "This can be done with:" @@ -1648,34 +1703,31 @@ msgstr "이 작업을 수행할 수 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "" -"To check which files have been modified compared to the last version (last " -"commit) and to see which files are staged for commit, you can use the :code:" -"`git status` command." +"To check which files have been modified compared to the last version " +"(last commit) and to see which files are staged for commit, you can use " +"the :code:`git status` command." msgstr "" -"마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이" -"징된 파일을 확인하려면 :code:`git status` 명령을 사용하면 됩니다." +"마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이징된 파일을 확인하려면 :code:`git " +"status` 명령을 사용하면 됩니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" -msgstr "**Commit 변경**" +msgstr "**변경사항 커밋**" #: ../../source/contributor-tutorial-contribute-on-github.rst:153 msgid "" "Once you have added all the files you wanted to commit using :code:`git " "add`, you can finally create your commit using this command:" -msgstr "" -":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명" -"령을 사용하여 커밋을 생성할 수 있습니다:" +msgstr ":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명령을 사용하여 커밋을 생성할 수 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:159 msgid "" -"The \\ is there to explain to others what the commit does. " -"It should be written in an imperative style and be concise. An example would " -"be :code:`git commit -m \"Add images to README\"`." +"The \\ is there to explain to others what the commit " +"does. It should be written in an imperative style and be concise. An " +"example would be :code:`git commit -m \"Add images to README\"`." msgstr "" -"커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명" -"령형 스타일로 작성해야 하며 간결해야 합니다. 예를 들면 :code:`git commit -m " -"\"Add images to README\"`." +"커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명령형 스타일로 작성해야 하며 간결해야" +" 합니다. 예를 들면 :code:`git commit -m \"Add images to README\"`." #: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" @@ -1683,20 +1735,18 @@ msgstr "**변경 사항을 포크에 푸시**" #: ../../source/contributor-tutorial-contribute-on-github.rst:163 msgid "" -"Once we have committed our changes, we have effectively updated our local " -"history, but GitHub has no way of knowing this unless we push our changes to " -"our origin's remote address:" +"Once we have committed our changes, we have effectively updated our local" +" history, but GitHub has no way of knowing this unless we push our " +"changes to our origin's remote address:" msgstr "" -"변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사" -"항을 원본의 원격 주소로 푸시하지 않는 한 GitHub는 이를 알 방법이 없습니다:" +"변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사항을 원본의 원격 주소로 푸시하지 않는 한 " +"GitHub는 이를 알 방법이 없습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:170 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." -msgstr "" -"이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub" -"에서 확인할 수 있습니다." +msgstr "이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub에서 확인할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:174 msgid "Creating and merging a pull request (PR)" @@ -1708,57 +1758,51 @@ msgstr "**PR 만들기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:177 msgid "" -"Once you have pushed changes, on the GitHub webpage of your repository you " -"should see the following message:" -msgstr "" -"변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩" -"니다:" +"Once you have pushed changes, on the GitHub webpage of your repository " +"you should see the following message:" +msgstr "변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:181 msgid "Otherwise you can always find this option in the ``Branches`` page." -msgstr "" -"그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." +msgstr "그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:183 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" -msgstr "" -"``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" +msgstr "``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:187 -msgid "" -"At the top you have an explanation of which branch will be merged where:" +msgid "At the top you have an explanation of which branch will be merged where:" msgstr "상단에는 어느 지점이 어디에 병합될 것인지에 대한 설명이 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" -"In this example you can see that the request is to merge the branch ``doc-" -"fixes`` from my forked repository to branch ``main`` from the Flower " -"repository." +"In this example you can see that the request is to merge the branch " +"``doc-fixes`` from my forked repository to branch ``main`` from the " +"Flower repository." msgstr "" -"이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리" -"의 ``main`` 브랜치에 병합하라는 요청을 볼 수 있습니다." +"이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리의 ``main`` 브랜치에 병합하라는" +" 요청을 볼 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:193 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " -"guidelines, otherwise it won't be possible to merge the PR. So in this case, " -"a correct title might be ``docs(framework:skip) Fix typos``." +"guidelines, otherwise it won't be possible to merge the PR. So in this " +"case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -"제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 " -"않으면 PR을 병합할 수 없습니다. 따라서 이 경우 올바른 제목은 " -"``docs(framework:skip) Fix typos``이 될 수 있습니다." +"제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 않으면 PR을 병합할 수 없습니다. " +"따라서 이 경우 올바른 제목은 ``docs(framework:skip) Fix typos``이 될 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "" -"The input box in the middle is there for you to describe what your PR does " -"and to link it to existing issues. We have placed comments (that won't be " -"rendered once the PR is opened) to guide you through the process." +"The input box in the middle is there for you to describe what your PR " +"does and to link it to existing issues. We have placed comments (that " +"won't be rendered once the PR is opened) to guide you through the " +"process." msgstr "" -"가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳" -"입니다. 프로세스를 안내하기 위해 코멘트(PR이 열리면 렌더링되지 않음)를 배치했" -"습니다." +"가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳입니다. 프로세스를 안내하기 위해 코멘트(PR이 " +"열리면 렌더링되지 않음)를 배치했습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:199 msgid "It is important to follow the instructions described in comments." @@ -1767,19 +1811,17 @@ msgstr "코멘트에 설명된 지침을 따르는 것이 중요합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:201 msgid "" "At the bottom you will find the button to open the PR. This will notify " -"reviewers that a new PR has been opened and that they should look over it to " -"merge or to request changes." +"reviewers that a new PR has been opened and that they should look over it" +" to merge or to request changes." msgstr "" -"하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 " -"병합하거나 변경을 요청하기 위해 검토해야 함을 알립니다." +"하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 병합하거나 변경을 요청하기 위해 검토해야 함을 " +"알립니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:204 msgid "" -"If your PR is not yet ready for review, and you don't want to notify anyone, " -"you have the option to create a draft pull request:" -msgstr "" -"PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull " -"request 초안을 만드는 옵션이 있습니다:" +"If your PR is not yet ready for review, and you don't want to notify " +"anyone, you have the option to create a draft pull request:" +msgstr "PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull request 초안을 만드는 옵션이 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" @@ -1788,11 +1830,9 @@ msgstr "**new changes 만들기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "" "Once the PR has been opened (as draft or not), you can still push new " -"commits to it the same way we did before, by making changes to the branch " -"associated with the PR." -msgstr "" -"PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으" -"로 새 커밋을 푸시할 수 있습니다." +"commits to it the same way we did before, by making changes to the branch" +" associated with the PR." +msgstr "PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으로 새 커밋을 푸시할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" @@ -1800,19 +1840,15 @@ msgstr "**PR 검토하기**" #: ../../source/contributor-tutorial-contribute-on-github.rst:212 msgid "" -"Once the PR has been opened or once the draft PR has been marked as ready, a " -"review from code owners will be automatically requested:" -msgstr "" -"PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요" -"청됩니다:" +"Once the PR has been opened or once the draft PR has been marked as " +"ready, a review from code owners will be automatically requested:" +msgstr "PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요청됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:216 msgid "" -"Code owners will then look into the code, ask questions, request changes or " -"validate the PR." -msgstr "" -"그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성" -"을 검사합니다." +"Code owners will then look into the code, ask questions, request changes " +"or validate the PR." +msgstr "그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성을 검사합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "Merging will be blocked if there are ongoing requested changes." @@ -1820,10 +1856,9 @@ msgstr "진행 중인 변경 요청이 있는 경우 병합이 차단됩니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:222 msgid "" -"To resolve them, just push the necessary changes to the branch associated " -"with the PR:" -msgstr "" -"이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" +"To resolve them, just push the necessary changes to the branch associated" +" with the PR:" +msgstr "이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "And resolve the conversation:" @@ -1831,7 +1866,8 @@ msgstr "그리고 소통을 통해 해결하세요:" #: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "" -"Once all the conversations have been resolved, you can re-request a review." +"Once all the conversations have been resolved, you can re-request a " +"review." msgstr "모든 대화가 해결되면 검토를 다시 요청할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:251 @@ -1840,19 +1876,15 @@ msgstr "**PR이 병합되면**" #: ../../source/contributor-tutorial-contribute-on-github.rst:234 msgid "" -"If all the automatic tests have passed and reviewers have no more changes to " -"request, they can approve the PR and merge it." -msgstr "" -"모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR" -"을 승인하고 병합할 수 있습니다." +"If all the automatic tests have passed and reviewers have no more changes" +" to request, they can approve the PR and merge it." +msgstr "모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR을 승인하고 병합할 수 있습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" -msgstr "" -"병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 " -"함), 로컬에서도 삭제할 수 있습니다:" +msgstr "병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 함), 로컬에서도 삭제할 수 있습니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "Then you should update your forked repository by doing:" @@ -1868,43 +1900,37 @@ msgstr "문제" #: ../../source/contributor-tutorial-contribute-on-github.rst:259 msgid "" -"For our documentation, we've started to use the `Diàtaxis framework `_." -msgstr "" -"저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작" -"했습니다." +"For our documentation, we've started to use the `Diàtaxis framework " +"`_." +msgstr "저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작했습니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" -"Our \"How to\" guides should have titles that continue the sentence \"How to " -"…\", for example, \"How to upgrade to Flower 1.0\"." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -"'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다" -"(예: \"How to upgrade to Flower 1.0\")." +"'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다(예: \"How to upgrade " +"to Flower 1.0\")." #: ../../source/contributor-tutorial-contribute-on-github.rst:263 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." -msgstr "" -"대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경" -"하는 작업은 생각보다 복잡합니다." +msgstr "대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경하는 작업은 생각보다 복잡합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:265 msgid "" -"This issue is about changing the title of a doc from present continuous to " -"present simple." -msgstr "" -"이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것" -"입니다." +"This issue is about changing the title of a doc from present continuous " +"to present simple." +msgstr "이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:267 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" -"\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보" -"겠습니다. 이것이 우리의 점검을 통과했나요?" +"\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보겠습니다. " +"이것이 우리의 점검을 통과했나요?" #: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Before: \"How to saving progress\" ❌" @@ -1923,8 +1949,8 @@ msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -"이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower " -"레포지토리를 복제하고 설정한 후에는 다음과 같이 하세요:" +"이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower 레포지토리를 복제하고 설정한 후에는 다음과 " +"같이 하세요:" #: ../../source/contributor-tutorial-contribute-on-github.rst:278 msgid "Find the source file in ``doc/source``" @@ -1934,17 +1960,15 @@ msgstr "``doc/source``에서 소스 파일을 찾습니다" msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" -msgstr "" -"``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합" -"니다)" +msgstr "``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합니다)" #: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "" -"Build the docs and `check the result `_" +"Build the docs and `check the result `_" msgstr "" -"문서를 빌드하고 '결과 확인 `_'합니다" +"문서를 빌드하고 '결과 확인 `_'합니다" #: ../../source/contributor-tutorial-contribute-on-github.rst:283 msgid "Rename file" @@ -1952,14 +1976,13 @@ msgstr "파일 이름 바꾸기" #: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" -"You might have noticed that the file name still reflects the old wording. If " -"we just change the file, then we break all existing links to it - it is " -"**very important** to avoid that, breaking links can harm our search engine " -"ranking." +"You might have noticed that the file name still reflects the old wording." +" If we just change the file, then we break all existing links to it - it " +"is **very important** to avoid that, breaking links can harm our search " +"engine ranking." msgstr "" -"파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변" -"경하면 파일에 대한 기존 링크가 모두 끊어지는데, 링크를 끊으면 검색 엔진 순위" -"에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." +"파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변경하면 파일에 대한 기존 링크가 모두 끊어지는데, " +"링크를 끊으면 검색 엔진 순위에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "Here's how to change the file name:" @@ -1975,11 +1998,11 @@ msgstr "'doc/source/conf.py'에 리디렉션 규칙을 추가합니다" #: ../../source/contributor-tutorial-contribute-on-github.rst:293 msgid "" -"This will cause a redirect from ``saving-progress.html`` to ``save-progress." -"html``, old links will continue to work." +"This will cause a redirect from ``saving-progress.html`` to ``save-" +"progress.html``, old links will continue to work." msgstr "" -"이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되" -"며, 이전 링크는 계속 작동합니다." +"이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되며, 이전 링크는 " +"계속 작동합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:296 msgid "Apply changes in the index file" @@ -1991,8 +2014,8 @@ msgid "" "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -"횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것" -"이 매우 중요합니다. 이 파일은 탐색 모음의 전체 배열을 정의하는 곳입니다." +"횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것이 매우 중요합니다. 이 파일은 탐색 모음의" +" 전체 배열을 정의하는 곳입니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:301 msgid "Find and modify the file name in ``index.rst``" @@ -2004,11 +2027,9 @@ msgstr "PR 열기" #: ../../source/contributor-tutorial-contribute-on-github.rst:306 msgid "" -"Commit the changes (commit messages are always imperative: \"Do something\", " -"in this case \"Change …\")" -msgstr "" -"변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do " -"something\"(이 경우 는 \"Change …\" )" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" +msgstr "변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do something\"(이 경우 는 \"Change …\" )" #: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Push the changes to your fork" @@ -2016,10 +2037,9 @@ msgstr "변경 사항을 포크에 푸시합니다" #: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" -"Open a PR (as shown above) with title ``docs(framework) Update how-to guide " -"title``" -msgstr "" -"``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" +"Open a PR (as shown above) with title ``docs(framework) Update how-to " +"guide title``" +msgstr "``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" #: ../../source/contributor-tutorial-contribute-on-github.rst:309 msgid "Wait for it to be approved!" @@ -2030,27 +2050,28 @@ msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "축하합니다! 이제 공식적으로 Flower 기여자가 되셨습니다!" #: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "다음 단계" #: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" -"Once you have made your first PR, and want to contribute more, be sure to " -"check out the following :" +"Once you have made your first PR, and want to contribute more, be sure to" +" check out the following :" msgstr "첫 번째 PR을 작성하고 더 많은 기여를 하고 싶다면 다음을 확인하세요:" #: ../../source/contributor-tutorial-contribute-on-github.rst:318 msgid "" -":doc:`Good first contributions `, " -"where you should particularly look into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +":code:`baselines` contributions." msgstr "" -":doc:`훌륭한 첫 번째 기여 `, 특히 :" -"code:`baselines` 기여를 살펴봐야 합니다." +":doc:`훌륭한 첫 번째 기여 `, 특히 " +":code:`baselines` 기여를 살펴봐야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:322 #: ../../source/fed/0000-20200102-fed-template.md:60 @@ -2067,23 +2088,22 @@ msgstr "다음과 같은 PR 제목 형식을 적용합니다:" #: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "" -"(or ``(:skip) `` to ignore the PR in the changelog)" -msgstr "" -"(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시" -"합니다.)" +"(or ``(:skip) `` to ignore the PR in the " +"changelog)" +msgstr "(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시합니다.)" #: ../../source/contributor-tutorial-contribute-on-github.rst:337 msgid "" -"Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, " -"```` should be in ``{framework, baselines, datasets, examples, or " -"'*' when modifying multiple projects which requires the ':skip' flag to be " -"used}``, and ```` starts with a capitalised verb in the imperative " -"mood." +"Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " +"break}``, ```` should be in ``{framework, baselines, datasets, " +"examples, or '*' when modifying multiple projects which requires the " +"':skip' flag to be used}``, and ```` starts with a capitalised " +"verb in the imperative mood." msgstr "" -"여기서 ````은 ``{ci, fix, feat, docs, refactor, break}``, ````" -"는 ``{framework, baselines, datasets, examples, or '*' ':skip' 플래그를 사용" -"해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 " -"대문자로 시작해야 합니다." +"여기서 ````은 ``{ci, fix, feat, docs, refactor, break}``, " +"````는 ``{framework, baselines, datasets, examples, or '*' " +"':skip' 플래그를 사용해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 대문자로 " +"시작해야 합니다." #: ../../source/contributor-tutorial-contribute-on-github.rst:341 msgid "Valid examples:" @@ -2113,8 +2133,7 @@ msgstr "``feat(framework): Add flwr build CLI command`` ( ``:``제외)" msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" -msgstr "" -"``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" +msgstr "``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" #: ../../source/contributor-tutorial-contribute-on-github.rst:351 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" @@ -2122,8 +2141,7 @@ msgstr "``feat(skip) Add flwr build CLI command`` (````누락)" #: ../../source/contributor-tutorial-contribute-on-github.rst:352 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" -msgstr "" -"``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" +msgstr "``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" #: ../../source/contributor-tutorial-contribute-on-github.rst:353 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" @@ -2138,13 +2156,16 @@ msgid "Get started as a contributor" msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "전제 조건" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" -msgstr "Python 3.8 `_ 이상" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 이상" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2155,27 +2176,26 @@ msgid "(Optional) `pyenv `_" msgstr "(선택 사항) `pyenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 -msgid "" -"(Optional) `pyenv-virtualenv `_" -msgstr "" -"(선택 사항) `pyenv-virtualenv `_" +msgid "(Optional) `pyenv-virtualenv `_" +msgstr "(선택 사항) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 msgid "" "Flower uses :code:`pyproject.toml` to manage dependencies and configure " -"development tools (the ones which support it). Poetry is a build tool which " -"supports `PEP 517 `_." +"development tools (the ones which support it). Poetry is a build tool " +"which supports `PEP 517 `_." msgstr "" -"Flower는 dependencies을 관리하고 개발 도구(이를 지원하는 도구)를 구성하기 위" -"해 :code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 `_을 지원하는 빌드 도구입니다." +"Flower는 dependencies을 관리하고 개발 도구(이를 지원하는 도구)를 구성하기 위해 " +":code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 " +"`_을 지원하는 빌드 도구입니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 msgid "Developer Machine Setup" msgstr "개발자 머신 설정" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" +#, fuzzy +msgid "Preliminaries" msgstr "사전 준비" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2188,18 +2208,15 @@ msgstr "macOS의 경우" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 msgid "" -"Install `homebrew `_. Don't forget the post-installation " -"actions to add `brew` to your PATH." -msgstr "" -"`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하" -"는 작업을 잊지 마세요." +"Install `homebrew `_. Don't forget the post-" +"installation actions to add `brew` to your PATH." +msgstr "`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하는 작업을 잊지 마세요." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 msgid "" -"Install `xz` (to install different Python versions) and `pandoc` to build " -"the docs::" -msgstr "" -"xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" +"Install `xz` (to install different Python versions) and `pandoc` to build" +" the docs::" +msgstr "xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 msgid "For Ubuntu" @@ -2207,11 +2224,9 @@ msgstr "Ubuntu의 경우" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 msgid "" -"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary " -"packages::" -msgstr "" -"시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는" -"지 확인하세요:" +"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " +"necessary packages::" +msgstr "시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는지 확인하세요:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 msgid "Create Flower Dev Environment" @@ -2221,49 +2236,45 @@ msgstr "Flower 개발 환경 만들기" msgid "" "1. Clone the `Flower repository `_ from " "GitHub::" -msgstr "" -"1. GitHub: 에서 ``Flower 레포지토리 `_를 복제" -"합니다::" +msgstr "1. GitHub: 에서 ``Flower 레포지토리 `_를 복제합니다::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 msgid "" -"Let's create the Python environment for all-things Flower. If you wish to " -"use :code:`pyenv`, we provide two convenience scripts that you can use. If " -"you prefer using something else than :code:`pyenv`, create a new " +"Let's create the Python environment for all-things Flower. If you wish to" +" use :code:`pyenv`, we provide two convenience scripts that you can use. " +"If you prefer using something else than :code:`pyenv`, create a new " "environment, activate and skip to the last point where all packages are " "installed." msgstr "" -"Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용" -"하고자 하는 경우 사용할 수 있는 두 가지 편의 스크립트를 제공합니다.:code:" -"`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키" -"지가 설치된 마지막 지점으로 건너뛰세요." +"Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용하고자 하는 경우 사용할 수 있는 두 " +"가지 편의 스크립트를 제공합니다.:code:`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키지가" +" 설치된 마지막 지점으로 건너뛰세요." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 msgid "" -"If you don't have :code:`pyenv` installed, the following script that will " -"install it, set it up, and create the virtual environment (with :code:" -"`Python 3.8.17` by default)::" +"If you don't have :code:`pyenv` installed, the following script that will" +" install it, set it up, and create the virtual environment (with " +":code:`Python 3.9.20` by default)::" msgstr "" -":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 " -"및 가상 환경을 생성합니다(기본적으로 :code:`Python 3.8.17` 사용):" +":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 및 가상 환경을 생성합니다(기본적으로 " +":code:`Python 3.9.20` 사용):" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 msgid "" "If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with :" -"code:`Python 3.8.17` by default)::" +"virtualenv` plugin), you can use the following convenience script (with " +":code:`Python 3.9.20` by default)::" msgstr "" -":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 " -"함께) 다음과 같은 편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python " -"3.8.17` 사용):" +":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 함께) 다음과 같은 " +"편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python 3.9.20` 사용):" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 msgid "" -"3. Install the Flower package in development mode (think :code:`pip install -" -"e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think :code:`pip " +"install -e`) along with all necessary dependencies::" msgstr "" -"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다" -"(예:code:`pip install -e`)::" +"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다(예:code:`pip install " +"-e`)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 msgid "Convenience Scripts" @@ -2272,13 +2283,12 @@ msgstr "편의 스크립트" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the :code:`/" -"dev` subdirectory for a full list. The following scripts are amongst the " -"most important ones:" +"recurring development tasks easier and less error-prone. See the " +":code:`/dev` subdirectory for a full list. The following scripts are " +"amongst the most important ones:" msgstr "" -"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가" -"지 편의 스크립트가 포함되어 있습니다. 전체 목록은 :code:`/dev` 하위 디렉터리" -"를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" +"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가지 편의 스크립트가 포함되어 있습니다. 전체 " +"목록은 :code:`/dev` 하위 디렉터리를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 msgid "Create/Delete Virtual Environment" @@ -2302,54 +2312,47 @@ msgstr "사전 커밋 훅 추가" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "" -"Developers may integrate a pre-commit hook into their workflow utilizing the " -"`pre-commit `_ library. The pre-commit hook " -"is configured to execute two primary operations: ``./dev/format.sh`` and ``./" -"dev/test.sh`` scripts." +"Developers may integrate a pre-commit hook into their workflow utilizing " +"the `pre-commit `_ library. The pre-" +"commit hook is configured to execute two primary operations: " +"``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -"개발자는 `pre-commit `_ 라이브러리를 사용하" -"여 사전 커밋 훅을 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 " -"작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및 ``./dev/test.sh`` 스크립" -"트." +"개발자는 `pre-commit `_ 라이브러리를 사용하여 사전 커밋 훅을" +" 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및" +" ``./dev/test.sh`` 스크립트." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 msgid "There are multiple ways developers can use this:" msgstr "개발자가 이것을 사용할 수 있는 여러가지 방법이 있습니다:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 -msgid "" -"Install the pre-commit hook to your local git directory by simply running:" +msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "간단하게 실행하여 로컬 git 디렉터리에 사전 커밋 훅을 설치하세요:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 msgid "" -"Each ``git commit`` will trigger the execution of formatting and linting/" -"test scripts." +"Each ``git commit`` will trigger the execution of formatting and " +"linting/test scripts." msgstr "각 ``git 커밋``은 포맷 및 린팅/테스트 스크립트의 실행을 트리거합니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 msgid "" -"If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` " -"command. ::" -msgstr "" -"급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" +"If in a hurry, bypass the hook using ``--no-verify`` with the ``git " +"commit`` command. ::" +msgstr "급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "For developers who prefer not to install the hook permanently, it is " -"possible to execute a one-time check prior to committing changes by using " -"the following command:" -msgstr "" -"훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사" -"항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" +"possible to execute a one-time check prior to committing changes by using" +" the following command:" +msgstr "훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." -msgstr "" -"이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 " -"및 린팅 검사/테스트를 실행합니다." +msgstr "이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 및 린팅 검사/테스트를 실행합니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 msgid "Run Github Actions (CI) locally" @@ -2357,22 +2360,20 @@ msgstr "로컬에서 Github Action(CI) 실행하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 msgid "" -"Developers could run the full set of Github Actions workflows under their " -"local environment by using `Act `_. Please " -"refer to the installation instructions under the linked repository and run " -"the next command under Flower main cloned repository folder::" +"Developers could run the full set of Github Actions workflows under their" +" local environment by using `Act `_. " +"Please refer to the installation instructions under the linked repository" +" and run the next command under Flower main cloned repository folder::" msgstr "" -"개발자는 `Act `_를 사용하여 로컬 환경에서 전" -"체 Github Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아" -"래의 설치 지침을 참조하여 Flower 메인 클론 레포지토리 폴더 아래에서 다음 명령" -"을 실행하세요::" +"개발자는 `Act `_를 사용하여 로컬 환경에서 전체 Github " +"Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아래의 설치 지침을 참조하여 Flower 메인 클론 " +"레포지토리 폴더 아래에서 다음 명령을 실행하세요::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." -msgstr "" -"Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." +msgstr "Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 msgid "Build Release" @@ -2380,19 +2381,15 @@ msgstr "릴리즈 빌드" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 msgid "" -"Flower uses Poetry to build releases. The necessary command is wrapped in a " -"simple script::" -msgstr "" -"Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트" -"로 래핑됩니다::" +"Flower uses Poetry to build releases. The necessary command is wrapped in" +" a simple script::" +msgstr "Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트로 래핑됩니다::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in " -"the :code:`/dist` subdirectory." -msgstr "" -"결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리" -"에 저장됩니다." +"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" +" the :code:`/dist` subdirectory." +msgstr "결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리에 저장됩니다." #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 msgid "Build Documentation" @@ -2400,20594 +2397,25727 @@ msgstr "문서 빌드" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 msgid "" -"Flower's documentation uses `Sphinx `_. There's " -"no convenience script to re-build the documentation yet, but it's pretty " -"easy::" +"Flower's documentation uses `Sphinx `_. " +"There's no convenience script to re-build the documentation yet, but it's" +" pretty easy::" msgstr "" -"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문" -"서를 다시 작성할 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 " -"있습니다:" +"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문서를 다시 작성할" +" 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 있습니다:" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "그러면 ``doc/build/html``에 HTML 문서가 생성됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" +#: ../../source/docker/enable-tls.rst:2 +#, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "보안 연결을 위한 SSL 사용 설정" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. We " -"are using PyTorch to train a Convolutional Neural Network(with Batch " -"Normalization layers) on the CIFAR-10 dataset. When applying FedBN, only few " -"changes needed compared to :doc:`Example: PyTorch - From Centralized To " -"Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " -"`_으로 기존 머신러닝 워크로드의 federated " -"버전을 구축하기 위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 " -"사용하여 CIFAR-10 데이터 세트에서 컨볼루션 신경망(일괄 정규화 레이어 포함)을 " -"훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " -"연합식으로 ` 와 비교했을 때 " -"몇 가지 사항만 변경 하면 됩니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "중앙 집중식 훈련" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:7 +#, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized To " -"Federated `. The only thing " -"to do is modifying the file called :code:`cifar.py`, revised part is shown " -"below:" -msgstr "" -"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar." -"py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 아래와 같습니다:" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 PEM으로 인코딩된 인증서 체인이 필요합니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니" -"다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" -msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" +"테스트 목적으로 자체 서명된 인증서를 생성할 수 있습니다. 'SSL 연결 사용 " +"`__ 페이지에 프로세스를 안내하는 섹션이 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:17 +#, fuzzy msgid "" -"So far this should all look fairly familiar if you've used PyTorch before. " -"Let's take the next step and use what we've built to create a federated " -"learning system within FedBN, the system consists of one server and two " -"clients." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. " -"다음 단계로 넘어가서 우리가 구축한 것을 사용하여 FedBN 내에서 하나의 서버와 " -"두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "연합 훈련" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are " -"easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the :doc:" -"`Example: PyTorch - From Centralized To Federated `. first." -msgstr "" -":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 " -":code:`client.py`의 :code:`get_parameters`와 :code:`set_parameters` 함수만 " -"수정해야 합니다. 그렇지 않은 경우 :doc:`예제: 파이토치 - 중앙 집중식에서 " -"연합식으로 `를 먼저 " -"읽어보세요." +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst:20 +#, fuzzy msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, :code:" -"`server.py` keeps unchanged, we can start the server directly." +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:" -"`server.py`는 변경되지 않고 그대로 유지되므로 서버를 바로 시작할 수 있습니다." +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:14 msgid "" -"Finally, we will revise our *client* logic by changing :code:" -"`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will " -"exclude batch normalization parameters from model parameter list when " -"sending to or receiving from the server." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 :code:" -"`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 " -"서버에서 받을 때 모델 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습" -"니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" +#: ../../source/docker/enable-tls.rst:27 +#, fuzzy +msgid "SuperLink" +msgstr "flower 초연결" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst:29 msgid "" -"in each window (make sure that the server is still running before you do so) " -"and see your (previously centralized) PyTorch project run federated learning " -"with FedBN strategy across two clients. Congratulations!" +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 " -"집중된) PyTorch 프로젝트가 두 클라이언트에서 FedBN으로 연합 학습을 실행하는 " -"것을 확인합니다. 축하합니다!" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "다음 단계" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 -msgid "" -"The full source code for this example can be found `here `_. Our " -"example is of course somewhat over-simplified because both clients load the " -"exact same dataset, which isn't realistic. You're now prepared to explore " -"this topic further. How about using different subsets of CIFAR-10 on each " -"client? How about adding more clients?" -msgstr "" -"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물" -"론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다" -"소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자" -"세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집" -"합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" - -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "예시: JAX - JAX Federated 실행" - -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 -msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing JAX workload. We are using JAX to train a linear regression " -"model on a scikit-learn dataset. We will structure the example similar to " -"our `PyTorch - From Centralized To Federated `_ walkthrough. " -"First, we build a centralized training approach based on the `Linear " -"Regression with JAX `_ tutorial`. Then, we build upon the centralized " -"training code to run the training in a federated fashion." -msgstr "" -"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 " -"방법을 보여드립니다. JAX를 사용해 scikit-learn 데이터 세트에서 선형 회귀 " -"모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 Federated으로 " -"`_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 " -"회귀 `_ 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 " -"다음 centralized 트레이닝 코드를 기반으로 federated 방식으로 트레이닝을 " -"실행합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 -msgid "" -"Before we start building our JAX example, we need install the packages :code:" -"`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" msgstr "" -"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-" -"learn`, :code:`flwr` 패키지를 설치해야 합니다:" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "JAX를 사용한 선형 회귀" +#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:213 +#: ../../source/docker/tutorial-quickstart-docker.rst:300 +#, fuzzy +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 -msgid "" -"We begin with a brief description of the centralized training code based on " -"a :code:`Linear Regression` model. If you want a more in-depth explanation " -"of what's going on then have a look at the official `JAX documentation " -"`_." +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:214 +#: ../../source/docker/tutorial-quickstart-docker.rst:301 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 " -"간략한 설명부터 시작하겠습니다. 더 자세한 설명을 원하시면 공식 `JAX 문서 " -"`_를 참조하세요." -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 -msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to be " -"imported. In addition, we need to import :code:`sklearn` since we use :code:" -"`make_regression` for the dataset and :code:`train_test_split` to split the " -"dataset into a training and test set. You can see that we do not yet import " -"the :code:`flwr` package for federated learning. This will be done later." -msgstr "" -"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 " -":code:`jax_training.py`라는 새 파일을 생성해 보겠습니다. 먼저, JAX 패키지인 " -":code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에 " -":code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 " -"분할하기 위해 :code:`train_test_split`을 사용하므로 :code:`sklearn`을 " -"가져와야 합니다. 연합 학습을 위해 아직 :code:`flwr` 패키지를 가져오지 않은 " -"것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test sets." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is " -"defined in :code:`load_model()`." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정" -"의되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 -msgid "" -"We now need to define the training (function :code:`train()`), which loops " -"over the training set and measures the loss (function :code:`loss_fn()`) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a :code:`grad()` function (defined in the :code:" -"`main()` function and called in :code:`train()`)." +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :" -"code:`loss_fn()`) 훈련(함수 :code:`train()`)을 정의해야 합니다. JAX는 :code:" -"`grad()` 함수(:code:`main()` 함수에 정의되고 :code:`train()`에서 호출됨)로 파" -"생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function :code:`evaluation()`. " -"The function takes all test examples and measures the loss of the linear " -"regression model." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 " -"테스트 예제를 가져와 선형 회귀 모델의 손실을 측정합니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. As " -"already mentioned, the :code:`jax.grad()` function is defined in :code:" -"`main()` and passed to :code:`train()`." +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하" -"여 JAX를 사용 모델을 훈련할 수 있습니다. 이미 언급했듯이 :code:`jax.grad()` " -"함수는 :code:`main()`에 정의되어 :code:`train()`에 전달됩니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. Let's " -"take the next step and use what we've built to create a simple federated " -"learning system consisting of one server and two clients." +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니" -"다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 하나의 서버와 두 개의 클" -"라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX와 Flower의 만남" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 -msgid "" -"The concept of federating an existing workload is always the same and easy " -"to understand. We have to start a *server* and then use the code in :code:" -"`jax_training.py` for the *clients* that are connected to the *server*. The " -"*server* sends model parameters to the clients. The *clients* run the " -"training and update the parameters. The updated parameters are sent back to " -"the *server*, which averages all received parameter updates. This describes " -"one round of the federated learning process, and we repeat this for multiple " -"rounds." -msgstr "" -"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 " -"시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`jax_training.py`의 " -"코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " -"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 " -"파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 업데이트의 평균을 " -"구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 " -"라운드에 걸쳐 반복합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up :code:" -"`server.py` first. The *server* needs to import the Flower package :code:" -"`flwr`. Next, we use the :code:`start_server` function to start a server and " -"tell it to perform three rounds of federated learning." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 " -":code:`server.py`를 설정해 보겠습니다. *server*는 Flower 패키지 :code:`flwr`" -"를 가져와야 합니다. 다음으로, :code:`start_server` 함수를 사용하여 서버를 " -"시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build " -"upon the previously defined JAX training in :code:`jax_training.py`. Our " -"*client* needs to import :code:`flwr`, but also :code:`jax` and :code:" -"`jaxlib` to update the parameters on our JAX model:" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:" -"`jax_training.py`에서 이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언" -"트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를 업데이트하기 위해 :" -"code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of " -"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " -"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " -"call it :code:`FlowerClient`. :code:`NumPyClient` is slightly easier to " -"implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like JAX) because it avoids some of the boilerplate that " -"would otherwise be necessary. :code:`FlowerClient` needs to implement four " -"methods, two methods for getting/setting model parameters, one method for " -"training the model, and one method for testing the model:" -msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` " -"또는 :code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니" -"다. 구현은 :code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:" -"`FlowerClient`라고 부를 것입니다. :code:`NumPyClient`는 필요한 일부 보일러플" -"레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 프레임워크(예: JAX)를 사" -"용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. code:" -"`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 " -"위한 메서드 1개, 모델 테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 " -"합니다:" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (선택사항)`" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "VSCode Dev Container에서 개발" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the server" -msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower 초연결" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy :code:`ndarray`'s " -"(think list of neural network layers)" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파" -"라미터 목록에 대해 반복합니다" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy :code:" -"`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 :" -"code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received from " -"the server" -msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" - -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "로컬 훈련 세트에서 모델을 훈련합니다" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" -msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "로컬 손실을 서버로 반환합니다" +#: ../../source/docker/enable-tls.rst:71 +#, fuzzy +msgid "SuperNode" +msgstr "run\\_supernode" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/enable-tls.rst:73 +#, fuzzy msgid "" -"The challenging part is to transform the JAX model parameters from :code:" -"`DeviceArray` to :code:`NumPy ndarray` to make them compatible with " -"`NumPyClient`." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy " -"ndarray`로 변환하여 `NumPyClient`와 호환되도록 하는 것입니다." +"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " +"때 ``--root-certificates`` 플래그를 사용하세요." -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/enable-tls.rst:78 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " -"use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type annotations " -"to give you a better understanding of the data types that get passed around." -msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전" -"에 :code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`" -"를 사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련" -"과 평가를 위해 호출할 함수를 :code:`NumPyClient` 서브클래스를 통해 Flower에" -"게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 유형 " -"type annotation을 포함했습니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 " -"있습니다" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do so) " -"and see your JAX project run federated learning across two clients. " -"Congratulations!" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 " -"연합 학습을 실행하는 JAX 프로젝트를 확인합니다. 축하합니다!" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "VSCode Dev Container에서 개발" + +#: ../../source/docker/enable-tls.rst msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습" -"니다: 'Quickstart JAX `_. 두 클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예" -"제는 다소 단순화되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/enable-tls.rst msgid "" -"You're now prepared to explore this topic further. How about using a more " -"sophisticated model or using a different dataset? How about adding more " -"clients?" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거" -"나 다른 데이터 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것" -"은 어떨까요?" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing machine learning workload. We are using PyTorch to train a " -"Convolutional Neural Network on the CIFAR-10 dataset. First, we introduce " -"this machine learning task with a centralized training approach based on the " -"`Deep Learning with PyTorch `_ tutorial. Then, we build upon the centralized " -"training code to run the training in a federated fashion." -msgstr "" -"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 " -"구축하는 방법을 보여드립니다. 여기서는 PyTorch를 사용해 CIFAR-10 데이터 " -"세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " -"`_ " -"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 " -"작업을 소개합니다. 그런 다음 centralized 훈련 코드를 기반으로 연합 방식 " -"훈련을 실행합니다." +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"We begin with a brief description of the centralized CNN training code. If " -"you want a more in-depth explanation of what's going on then have a look at " -"the official `PyTorch tutorial `_." +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 " -"일이 일어나고 있는지 더 자세히 설명하려면 공식 `PyTorch 튜토리얼 " -"`_을 " -"참조하세요." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all " -"required packages (such as :code:`torch` and :code:`torchvision`) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the " -"federated learning components at a later point." -msgstr "" -"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 " -":code:`cifar.py`라는 새 파일을 생성해 보겠습니다. 먼저, 필요한 모든 " -"패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 합니다. 연합 " -"학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 " -"구성 요소를 추가할 때에도 이러한 모든 가져오기를 그대로 유지할 수 있습니다." +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. " -"모델 아키텍처(매우 간단한 컨볼루션 신경망)는 :code:`class Net()`에 정의되어 " -"있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test sets. " -"The :code:`transform` normalized the data after loading." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:" -"`transform`은 로드 후 데이터를 정규화합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`) which loops " -"over the training set, measures the loss, backpropagates it, and then takes " -"one optimizer step for each batch of training examples." +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 " -"배치에 대해 하나의 최적화 단계를 수행하는 학습(함수 :code:`train()`)을 정의해" -"야 합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function :code:`test()`. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘" -"플을 반복하고 테스트 데이터 세트에 따라 모델의 손실을 측정합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 " -"CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/enable-tls.rst msgid "" -"So far, this should all look fairly familiar if you've used PyTorch before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다" -"음 단계로 넘어가서 구축한 것을 사용하여 하나의 서버와 두 개의 클라이언트로 구" -"성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 -msgid "" -"The simple machine learning project discussed in the previous section trains " -"the model on a single dataset (CIFAR-10), we call this centralized learning. " -"This concept of centralized learning, as shown in the previous section, is " -"probably known to most of you, and many of you have used it previously. " -"Normally, if you'd want to run machine learning workloads in a federated " -"fashion, then you'd have to change most of your code and set everything up " -"from scratch. This can be a considerable effort." -msgstr "" -"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)" -"로 모델을 학습시키는데, 이를 중앙 집중식 학습이라고 부릅니다. 이전 섹션에서 " -"설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 " -"사용해 보셨을 것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 " -"실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 합니다. " -"이는 상당한 노력이 필요할 수 있습니다." +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" +msgstr "Docker를 사용하여 Flower 실행" + +#: ../../source/docker/index.rst:4 msgid "" -"However, with Flower you can evolve your pre-existing code into a federated " -"learning setup without the need for a major rewrite." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으" -"로 발전시킬 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/index.rst:7 msgid "" -"The concept is easy to understand. We have to start a *server* and then use " -"the code in :code:`cifar.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The *clients* " -"run the training and update the parameters. The updated parameters are sent " -"back to the *server* which averages all received parameter updates. This " -"describes one round of the federated learning process and we repeat this for " -"multiple rounds." -msgstr "" -"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*" -"에 대해 :code:`cifar.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 " -"클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 " -"업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 " -"수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 프로세스의 " -"한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build " -"upon the previously defined centralized training in :code:`cifar.py`. Our " -"*client* needs to import :code:`flwr`, but also :code:`torch` to update the " -"parameters on our PyTorch model:" +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "시작하기" + +#: ../../source/docker/index.rst:20 +msgid "Running in Production" msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`" -"에서 이전에 정의한 중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 " -":code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 업데이트하기 위해 " -":code:`torch`도 가져와야 합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of " -"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " -"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " -"call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier to " -"implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids some " -"of the boilerplate that would otherwise be necessary. :code:`CifarClient` " -"needs to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing " -"the model:" -msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` " -"또는 :code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니" -"다. 우리의 구현은 :code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :" -"code:`CifarClient`라고 부를 것입니다. :code:`NumPyClient`는 파이토치나 텐서플" -"로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 사용하는 경우 필요한 일" -"부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습" -"니다. code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모" -"델 학습을 위한 메서드 1개, 모델 테스트를 위한 메서드 1개 등 네 가지 메서드를 " -"구현해야 합니다:" +#: ../../source/docker/index.rst:29 +#, fuzzy +msgid "Advanced Options" +msgstr "고급 Docker 옵션" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/index.rst:41 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Docker를 사용하여 Flower 실행" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "로컬 손실 및 정확도를 서버에 반환합니다" +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy +msgid "" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." +msgstr "" +"기본적으로 Flower SuperLink는 상태를 in-memory에 유지합니다. Docker 플래그 `--rm``을 사용하는 경우" +" 컨테이너 시작 사이에 상태가 유지되지 않습니다. 아래에서 호스트 시스템의 파일에 상태를 저장하는 방법을 보여드리겠습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " -"use of the functions :code:`train()` and :code:`test()` previously defined " -"in :code:`cifar.py`. So what we really do here is we tell Flower through " -"our :code:`NumPyClient` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give you a " -"better understanding of the data types that get passed around." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전" -"에 :code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합" -"니다. 따라서 여기서 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이" -"미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 Flower에 알려주는 것입니" -"다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습" -"니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/persist-superlink-state.rst:10 msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load your " -"data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the " -"function :code:`fl.client.start_client()` by pointing it at the same IP " -"address we used in :code:`server.py`:" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생" -"성하고, 이 클라이언트를 시작하는 작업만 남았습니다. 코드:`cifar.py`를 사용하" -"여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과 동일한 IP 주" -"소를 지정하여 :code:`fl.client.start_client()` 함수로 :code:`CifarClient`를 " -"시작합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/persist-superlink-state.rst:20 +#, fuzzy msgid "" -"in each window (make sure that the server is running before you do so) and " -"see your (previously centralized) PyTorch project run federated learning " -"across two clients. Congratulations!" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) " -"PyTorch 프로젝트가 두 클라이언트에서 연합 학습을 실행하는 것을 확인합니다. " -"축하합니다!" +"아래 예에서는 새 디렉터리를 생성하고, 사용자 ID를 변경하고, 플래그 ``--volume``을 통해 Docker에게 로컬 " +"``state`` 디렉터리를 컨테이너의 ``/app/state`` 디렉터리에 마운트하도록 지시합니다. 또한 " +"``--database`` 플래그를 사용하여 데이터베이스 파일의 이름을 지정합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/persist-superlink-state.rst:35 +#, fuzzy msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more " -"clients?" -msgstr "" -"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코" -"드) `_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데" -"이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 " -"않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에" -"서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 " -"더 추가하는 것은 어떨까요?" +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." +msgstr "" +"SuperLink가 시작되자마자 호스트 시스템의 ``state`` 디렉터리에 ``state.db`` 파일이 생성됩니다. 파일이 이미" +" 존재하는 경우 SuperLink는 파일에서 상태를 복원하려고 시도합니다. 빈 데이터베이스로 SuperLink를 시작하려면 " +"``state.db`` 파일을 제거하면 됩니다." + +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "특정 버전에 Docker 이미지 고정하기" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "차분 프라이버시" +#: ../../source/docker/pin-version.rst:4 +#, fuzzy +msgid "" +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." +msgstr "" +"태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 " +"의존성에 대한 보안 업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 이미지의 해시를 지정할 수 있습니다." -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/pin-version.rst:13 +#, fuzzy msgid "" -"The information in datasets like healthcare, financial transactions, user " -"preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such data " -"is also sensitive and there is a risk of compromising individual privacy." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니다:" + +#: ../../source/docker/pin-version.rst:22 +msgid "This will output" msgstr "" -"의료, 금융 거래, 사용자 선호도 등과 같은 데이터 세트의 정보는 가치 있고 과학" -"적 혁신의 잠재력을 지니고 있으며 중요한 비즈니스 인사이트를 제공합니다. 그러" -"나 이러한 데이터는 또한 민감한 정보이며 개인의 프라이버시를 침해할 위험이 있" -"습니다." -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/pin-version.rst:29 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" + +#: ../../source/docker/run-as-root-user.rst:2 +#, fuzzy +msgid "Run with Root User Privileges" +msgstr "루트 사용자 권한으로 실행" + +#: ../../source/docker/run-as-root-user.rst:4 +#, fuzzy msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where differential " -"privacy comes in. It provides the possibility of analyzing data while " -"ensuring the privacy of individuals." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"익명화와 같은 기존 방법만으로는 재식별 및 데이터 연결과 같은 공격으로 인해 효" -"과가 없습니다. 그래서 차분 프라이버시가 등장했습니다. 차등 개인정보 보호는 개" -"인의 프라이버시를 보장하면서 데이터를 분석할 수 있는 가능성을 제공합니다." +"기본적으로 Flower Docker 이미지는 루트 사용자가 아닌 사용자(사용자명/그룹명:``app``, UID/GID: " +"``49999``)로 실행됩니다. 빌드 프로세스 중 특정 작업에 필요한 경우가 아니라면 루트 사용자를 사용하지 않는 것이 좋습니다." +" 보안 모범 사례를 유지하려면 항상 프로덕션 환경에서 루트 사용자가 아닌 사용자로 컨테이너를 실행해야 합니다." -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the " -"individual's information remains hidden in the crowd." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"하나의 레코드(예: 앨리스의 데이터)를 제외하고는 동일한 두 개의 데이터 세트가 " -"있다고 상상해 보세요. 차분 프라이버(DP)는 평균 소득 계산과 같은 모든 분석(M)" -"이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 " -"비슷할 것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 " -"개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" -msgstr "DP 소개" +#: ../../source/docker/run-as-root-user.rst:12 +#, fuzzy +msgid "Run a Container with Root User Privileges" +msgstr "**루트 사용자 권한으로 컨테이너 실행하기**" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the analysis." -msgstr "" -"DP를 달성하기 위해 가장 일반적으로 사용되는 메커니즘 중 하나는 분석의 전반적" -"인 정확도를 유지하면서 데이터에서 각 개인의 기여도를 가릴 수 있도록 분석 결과" -"에 충분한 노이즈를 추가하는 것입니다." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" +msgstr "``-u`` 플래그를 사용하여 Docker 이미지를 실행하고 사용자 이름으로 ``root``를 지정합니다:" -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" -msgstr "공식 정의" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "이 명령은 루트 사용자 권한으로 Docker 컨테이너를 실행합니다." -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/run-as-root-user.rst:24 +#, fuzzy +msgid "Run the Build Process with Root User Privileges" +msgstr "**루트 사용자 권한으로 빌드 프로세스를 실행합니다**" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a " -"single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, :math:`\\delta`)-" -"differential privacy if for any two neighboring databases, D :sub:`1` and D :" -"sub:`2`, that differ in only a single record, and for all possible outputs S " -"⊆ Range(A):" -msgstr "" -"차분 프라이버시(DP)는 공격자가 무작위 알고리즘의 출력을 통해 유추할 수 있는 " -"정보에 대해 통계적 보장을 제공합니다. 이는 노이즈를 추가하여 알고리즘의 출력" -"에 대한 한 개인의 영향력에 대한 무조건적인 상한선을 제공합니다[1]. 무작위 메" -"커니즘 M은 하나의 레코드만 다른 두 개의 인접 데이터베이스인 D:sub:`1`과 D:" -"sub:`2`의 경우, 가능한 모든 출력 S ⊆ Range(A)에 대해 (:math:`\\epsilon`, :" -"math:`\\delta`)-차분 프라이버시를 제공합니다:" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." +msgstr "" +"Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 설치하려면 Dockerfile 내에서 ``USER " +"root`` 지시어를 사용할 수 있습니다." -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-as-root-user.rst:29 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "SuperNode Dockerfile 만들기" + +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "린터 및 테스트 실행" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts for " -"a small probability on which the upper bound :math:`\\epsilon` does not " -"hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum " -"change in the output due to the inclusion or removal of a single record." -msgstr "" -"프라이버시 예산이라고도 하는 :math:`\\epsilon` 매개변수는 프라이버시 손실을 " -"측정하는 지표입니다. 이 매개변수는 프라이버시와 효용의 균형을 제어하며, :" -"math:`\\epsilon` 값이 낮을수록 프라이버시 수준이 높지만 효용도 감소할 가능성" -"이 높습니다. math:`\\delta` 매개변수는 상한값인 :math:`\\epsilon`이 적용되지 " -"않는 작은 확률을 설명합니다. 차분 프라이버시를 달성하는 데 필요한 노이즈의 양" -"은 출력의 감도에 비례하며, 이는 단일 레코드의 포함 또는 제거로 인한 출력의 최" -"대 변화를 측정합니다." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" -msgstr "머신 러닝의 차분 프라이버시" +#: ../../source/docker/run-as-subprocess.rst:16 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower SuperNode" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/run-as-subprocess.rst:30 +#, fuzzy msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific information " -"about any individual data points and subsequently prevent the model from " -"revealing sensitive information. Depending on the stage at which noise is " -"introduced, various methods exist for applying DP to machine learning " -"algorithms. One approach involves adding noise to the training data (either " -"to the features or labels), while another method entails injecting noise " -"into the gradients of the loss function during model training. Additionally, " -"such noise can be incorporated into the model's output." -msgstr "" -"머신 러닝에서 DP를 활용하여 학습 데이터의 개인정보를 보호할 수 있습니다. 차" -"분 비공개 머신 러닝 알고리즘은 알고리즘이 개별 데이터 포인트에 대한 특정 정보" -"를 학습하지 못하도록 하여 모델이 민감한 정보를 노출하지 않도록 하는 방식으로 " -"설계되었습니다. 노이즈가 도입되는 단계에 따라 머신 러닝 알고리즘에 DP를 적용" -"하는 다양한 방법이 존재합니다. 한 가지 방법은 학습 데이터(특징 또는 레이블)" -"에 노이즈를 추가하는 것이고, 다른 방법은 모델 학습 중에 손실 함수의 기울기에 " -"노이즈를 주입하는 것입니다. 또한 이러한 노이즈를 모델의 출력에 통합할 수도 있" -"습니다." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" +msgstr "" +"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 SuperNode Docker " +"이미지를 빌드합니다." -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" -msgstr "연합 학습의 차분 프라이버시" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information " -"about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"연합 학습은 여러 당사자가 원시 데이터를 공유하지 않고도 공동으로 모델을 " -"학습할 수 있는 데이터 최소화 접근 방식입니다. 그러나 연합 학습은 새로운 " -"개인정보 보호 문제를 야기하기도 합니다. 당사자와 중앙 서버 간의 모델 " -"업데이트는 로컬 데이터에 대한 정보를 유출할 수 있습니다. 이러한 유출은 " -"멤버십 추론 및 속성 추론 공격이나 모델 반전 공격과 같은 공격에 악용될 수 " -"있습니다." -#: ../../source/explanation-differential-privacy.rst:58 -msgid "" -"DP can play a crucial role in federated learning to provide privacy for the " -"clients' data." -msgstr "DP는 연합 학습에서 클라이언트의 데이터에 대한 개인 정보 보호를 제공하는 데 " -"중요한 역할을 할 수 있습니다." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Depending on the granularity of privacy provision or the location of noise " -"addition, different forms of DP exist in federated learning. In this " -"explainer, we focus on two approaches of DP utilization in federated " -"learning based on where the noise is added: at the server (also known as the " -"center) or at the client (also known as the local)." +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"개인 정보 제공의 세분성 또는 노이즈 추가 위치에 따라 연합 학습에는 다양한 " -"형태의 DP가 존재합니다. 이 설명에서는 노이즈가 추가되는 위치에 따라 서버(" -"중앙이라고도 함) 또는 클라이언트(로컬이라고도 함)에서의 연합 학습에서 DP를 " -"활용하는 두 가지 접근 방식에 중점을 둡니다." -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 msgid "" -"**Central Differential Privacy**: DP is applied by the server and the goal " -"is to prevent the aggregated model from leaking information about each " -"client's data." +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." msgstr "" -"**중앙 차분 프라이버시**: DP는 서버에서 적용되며 집계된 모델이 각 클라이언트" -"의 데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." -#: ../../source/explanation-differential-privacy.rst:65 -msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the updates " -"that are sent to the server from leaking any information about the client's " -"data." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -"**로컬 차분 개인정보 보호**: DP는 정보를 서버로 보내기 전에 클라이언트 측에" -"서 적용되며, 서버로 전송되는 업데이트가 클라이언트 데이터에 대한 정보를 유출" -"하는 것을 방지하는 것이 목표입니다." -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" -msgstr "중앙 차분 프라이버시" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"In this approach, which is also known as user-level DP, the central server " -"is responsible for adding noise to the globally aggregated parameters. It " -"should be noted that trust in the server is required." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." msgstr "" -"사용자 수준 DP라고도 하는 이 접근 방식에서는 중앙 서버가 전역적으로 집계된 매" -"개변수에 노이즈를 추가하는 역할을 담당합니다. 서버에 대한 신뢰가 필요하다는 " -"점에 유의해야 합니다." -#: ../../source/explanation-differential-privacy.rst:76 -msgid "" -"While there are various ways to implement central DP in federated learning, " -"we concentrate on the algorithms proposed by [2] and [3]. The overall " -"approach is to clip the model updates sent by the clients and add some " -"amount of noise to the aggregated model. In each iteration, a random set of " -"clients is chosen with a specific probability for training. Each client " -"performs local training on its own data. The update of each client is then " -"clipped by some value `S` (sensitivity `S`). This would limit the impact of " -"any individual client which is crucial for privacy and often beneficial for " -"robustness. A common approach to achieve this is by restricting the `L2` " -"norm of the clients' model updates, ensuring that larger updates are scaled " -"down to fit within the norm `S`." -msgstr "" -"연합 학습에서 중앙 DP를 구현하는 방법은 여러 가지가 있지만, 여기서는 [2]와 " -"[3]에서 제안한 알고리즘에 집중합니다. 전반적인 접근 방식은 클라이언트가 " -"전송한 모델 업데이트를 잘라내고 집계된 모델에 약간의 노이즈를 추가하는 " -"것입니다. 각 반복에서 특정 확률로 훈련할 무작위 클라이언트 세트가 " -"선택됩니다. 각 클라이언트는 자체 데이터에 대해 로컬 학습을 수행합니다. 그런 " -"다음 각 클라이언트의 업데이트는 특정 값 `S`(민감도 `S`)에 의해 잘립니다. " -"이렇게 하면 개별 클라이언트의 영향을 제한할 수 있어 개인정보 보호에 중요하고 " -"견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 " -"방식은 클라이언트 모델 업데이트의 `L2` 규범을 제한하여 더 큰 업데이트가 규범 " -"`S`에 맞도록 축소되도록 하는 것입니다." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "예시 요청" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" -msgstr "클리핑" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +msgid "" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to distort " -"the sum of all clients' updates. The amount of noise is scaled to the " -"sensitivity value to obtain a privacy guarantee. The Gaussian mechanism is " -"used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / " -"(number of sampled clients)`." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -"그 후 가우시안 메커니즘을 사용하여 모든 클라이언트의 업데이트 합계를 왜곡하" -"기 위해 노이즈를 추가합니다. 노이즈의 양은 감도 값에 따라 조정되어 프라이버" -"시 보장을 얻습니다. 가우시안 메커니즘은 `N (0, σ²)`에서 샘플링된 노이즈와 함" -"께 사용됩니다. 여기서 `σ = (noise_scale * S) / (샘플링된 클라이언트 수)`입니" -"다." -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" -msgstr "클리핑" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#, fuzzy msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed Clipping " -"and Adaptive Clipping." -msgstr "중앙 DP에서 일반적으로 사용되는 클리핑에는 고정 클리핑과 조정 클리핑의 두 " -"가지 형태가 있습니다." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#, fuzzy +msgid "pyproject.toml" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude of " -"clients' updates. Any update exceeding this threshold is clipped back to the " -"threshold value." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -"**고정 클리핑** : 클라이언트의 업데이트 크기에 대해 미리 정의된 고정 임계값" -"이 설정됩니다. 이 임계값을 초과하는 모든 업데이트는 임계값으로 다시 클리핑됩" -"니다." -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based on " -"the observed update distribution [4]. It means that the clipping value is " -"tuned during the rounds with respect to the quantile of the update norm " -"distribution." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"**조정 클리핑** : 클리핑 임계값은 관찰된 업데이트 분포에 따라 동적으로 " -"조정됩니다[4]. 즉, 클리핑 값은 업데이트 표준 분포의 사분위수에 따라 라운드가 " -"진행되는 동안 조정됩니다." -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "전체 코드 예제" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 msgid "" -"The choice between fixed and adaptive clipping depends on various factors " -"such as privacy requirements, data distribution, model complexity, and " -"others." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -"고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모" -"델 복잡성 등 다양한 요인에 따라 달라집니다." -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" -msgstr "로컬 차분 프라이버시" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted that " -"local DP leads to a decrease in accuracy but better privacy in comparison to " -"central DP." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -"이 접근 방식에서는 각 클라이언트가 DP를 수행할 책임이 있습니다. 로컬 DP는 완" -"전히 신뢰할 수 있는 애그리게이터가 필요하지 않지만, 로컬 DP는 중앙 DP에 비해 " -"정확도는 떨어져도 개인 정보 보호는 더 우수하다는 점에 유의해야 합니다." -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "이 설명에서는 로컬 DP를 달성하는 두 가지 형태에 중점을 둡니다:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 -msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering the " -"sensitivity of the local model to be ∆, Gaussian noise is applied with a " -"noise scale of σ where:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#, fuzzy +msgid "Limitations" +msgstr "동기" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#, fuzzy +msgid "Quickstart Example" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#, fuzzy +msgid "quickstart-fastai" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -"각 클라이언트는 로컬 업데이트를 서버로 보내기 전에 로컬 업데이트에 노이즈를 " -"추가합니다. 로컬 모델의 감도를 ∆로 간주하여 가우시안 노이즈가 σ의 노이즈 스케" -"일로 적용되어 (:math:`\\epsilon`, :math:`\\delta`)-DP를 달성하기 위해, 여기" -"서 σ는 노이즈 스케일입니다:" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-jax" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" -"{\\epsilon}\n" -"\n" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" -"{\\epsilon}\n" -"\n" -#: ../../source/explanation-differential-privacy.rst:125 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlx" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -"각 클라이언트는 로컬 트레이닝(DP-SGD) 중에 모델의 gradient에 노이즈를 추가합" -"니다. 보다 구체적으로, 이 접근 방식에서는 gradient이 클리핑되고 보정된 노이즈" -"가 gradient에 주입됩니다." -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-monai" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-pandas" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +msgid "quickstart-pytorch-lightning" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -"이 두 가지 접근 방식은 서로 다른 수준의 개인정보 보호 기능을 제공한다는 점에 " -"유의하세요." -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" -msgstr "**참고:**" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "[1] Dwork 외. 차분 프라이버시의 알고리즘적 기초." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +msgid "quickstart-sklearn-tabular" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language Models." -msgstr "[2] McMahan 외. 차분적 개인 반복 언어 모델 학습." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-differential-privacy.rst:137 -msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client Level " -"Perspective." -msgstr "[3] Geyer 외. 차분적 개인 연합 학습: 고객 수준의 관점." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-differential-privacy.rst:139 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + +#: ../../source/docker/set-environment-variables.rst:2 +#, fuzzy +msgid "Set Environment Variables" +msgstr "환경 변수 설정" + +#: ../../source/docker/set-environment-variables.rst:4 +#, fuzzy msgid "" -"[4] Galen et al. Differentially Private Learning with Adaptive Clipping." -msgstr "[4] Galen 외. 조정형 클리핑을 통한 차분적 개인 학습." +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용하면 됩니다." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "연합 평가" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or client-" -"side) evaluation." +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -"연합 학습 시스템에서 모델을 평가하는 데는 중앙 집중식(또는 서버 측) 평가와 " -"연합(또는 클라이언트 측) 평가라는 두 가지 주요 접근 방식이 있습니다." -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "중앙 집중식 평가" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "기본 제공 전략" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 -msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -"모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 " -"지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 입력으로 받아 평가 " -"결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "사용자 정의 전략" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -"The :code:`Strategy` abstraction provides a method called :code:`evaluate` " -"that can directly be used to evaluate the current global model parameters. " -"The current server implementation calls :code:`evaluate` after parameter " -"aggregation and before federated evaluation (see next paragraph)." +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -"코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있" -"는 :코드:`평가`라는 메서드를 제공합니다. 현재 서버 구현에서는 매개변수 집계 " -"후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "연합 평가" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "서버(SuperLink)" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "연합 평가 구현" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and can " -"be configured from the server side." +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -"클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에" -"서 구성할 수 있습니다." -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "연합 평가 구성" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 -msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" -msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 " -"지원합니다:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:215 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of clients " -"that will be selected for evaluation. If :code:`fraction_evaluate` is set " -"to :code:`0.1` and :code:`100` clients are connected to the server, then :" -"code:`10` will be randomly selected for evaluation. If :code:" -"`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be " -"disabled." +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -":code:`fraction_evaluate`: 평가를 위해 선택될 클라이언트의 비율을 정의하는 " -":code:`float`입니다. 코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 " -":code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`10`이 평가를 위해 " -"무작위로 선택됩니다. code:`fraction_evaluate`가 :code:`0.0`으로 설정된 경우 " -"연합 평가가 비활성화됩니다." -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients " -"to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:" -"`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients " -"are connected to the server, then :code:`20` clients will be selected for " -"evaluation." +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -":code:`min_evaluate_clients`: 평가를 위해 선택할 최소 클라이언트 수. :code:" -"`int`. 코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:" -"`fraction_evaluate`가 20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 " -"연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 선택됩니다." -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:216 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round of " -"federated evaluation can start. If fewer than :code:`min_available_clients` " -"are connected to the server, the server will wait until more clients are " -"connected before it continues to sample clients for evaluation." +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -":code:`min_available_clients`: federated 평가 단계를 시작하기 전에 서버에 연" -"결해야 하는 최소 클라이언트 수를 정의하는 :code:`int`입니다. 서버에 연결된 클" -"라이언트가 :code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트" -"가 연결될 때까지 기다렸다가 평가를 위한 클라이언트 샘플링을 계속합니다." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will be " -"called during each round and provides a convenient way to customize client-" -"side evaluation from the server side, for example, to configure the number " -"of validation steps performed." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -"code:`on_evaluate_config_fn`: 선택한 클라이언트로 전송할 구성 사전을 반환하" -"는 함수입니다. 이 함수는 각 단계 중에 호출되며, 서버 측에서 클라이언트 측 평" -"가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 " -"수 구성)." - -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "훈련 중 로컬 모델 업데이트 평가" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Model parameters can also be evaluated during training. :code:`Client.fit` " -"can return arbitrary evaluation results as a dictionary:" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -"모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 " -"평가 결과를 dictionary로 반환할 수 있습니다:" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "전체 코드 예제" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 -msgid "" -"For a full code example that uses both centralized and federated evaluation, " -"see the *Advanced TensorFlow Example* (the same approach can be applied to " -"workloads implemented in any other framework): https://github.com/adap/" -"flower/tree/main/examples/advanced-tensorflow" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -"연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 " -"텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 동일한 접근 방식을 " -"적용할 수 있음)를 참조하세요: https://github.com/adap/flower/tree/main/" -"examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 템플릿" - -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "목차" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[목차](#목차)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[요약](#요약)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[동기](#동기)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[목표](#목표)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[비목표](#비목표)" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[제안](#제안)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[단점](#단점)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[고려되는 대안](#고려되는 대안)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[부록](#부록)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "요약" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 문장 1: 문제 요약\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "동기" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "목표" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "비목표" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "제안" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "단점" +#: ../../source/docker/tutorial-quickstart-docker.rst:144 +msgid "" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "고려되는 대안" +#: ../../source/docker/tutorial-quickstart-docker.rst:148 +msgid "" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[대안 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "flower 클라이언트 앱" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[대안 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "SuperNode Dockerfile 만들기" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Metadata](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[워크플로우](#워크플로우)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "또는 ``pyproject.toml``:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "변화의 동기가 분명한지 확인합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to completion " -"across one or more releases while stakeholders are adequately represented " -"throughout the process" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있" -"도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 적절히 대표되도록 보장합" -"니다" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "기능 및 effort-tracking 문서" +#: ../../source/docker/tutorial-quickstart-docker.rst:184 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "제품 요구 사항 문서" +#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#, fuzzy +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" +msgstr "" +"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" +" 빌드합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "디자인 문서" +#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#, fuzzy +msgid "" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "" +"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " +"뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +msgid "Start the first ClientApp container:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand and " -"communicate upcoming changes to the project." +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변" -"경 사항을 이해하고 전달하기 위해 단일 GitHub 이슈 또는 pull request를 넘어서" -"는 abstraction이 필요합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 -msgid "" -"The purpose of this process is to reduce the amount of \"tribal knowledge\" " -"in our community. By moving decisions from Slack threads, video calls, and " -"hallway conversations into a well-tracked artifact, this process aims to " -"enhance communication and discoverability." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프" -"로세스는 Slack 스레드, 영상 통화, 복도 대화에서 나온 의사 결정을 잘 추적된 아" -"티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니" -"다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +msgid "Start the second ClientApp container:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:233 +msgid "Step 5: Start the SuperExec" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#, fuzzy +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니다." + +#: ../../source/docker/tutorial-quickstart-docker.rst:237 msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement " -"process. If an enhancement would be described in either written or verbal " -"communication to anyone besides the author or developer, then consider " -"creating an Enhancement Doc." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 " -"합니다. 개선 사항을 작성자나 개발자 이외의 다른 사람에게 서면 또는 구두로 설" -"명해야 하는 경우에는 개선 문서 작성을 고려하세요." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst:240 msgid "" -"Similarly, any technical effort (refactoring, major architectural change) " -"that will impact a large section of the development community should also be " -"communicated widely. The Enhancement process is suited for this even if it " -"will have zero impact on the typical user or operator." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주" -"요 아키텍처 변경)도 널리 알려야 합니다. 개선 프로세스는 일반 사용자나 운영자" -"에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "Dockerfile.superexec" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For small changes and additions, going through the Enhancement process would " -"be time-consuming and unnecessary. This includes, for example, adding new " -"Federated Learning algorithms, as these only add features without changing " -"how Flower works or is used." +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 " -"불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을 추가하는 것은 Flower의 " -"작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by members " -"of the community." +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지" -"하는 것이므로 기능 요청과는 다릅니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for reference " -"— the Enhancement Doc." +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 " -"워크플로우를 따르는 Markdown 파일에 캡처됩니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Enhancement Doc 템플릿" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:277 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" +#: ../../source/docker/tutorial-quickstart-docker.rst:285 +msgid "Start the SuperExec container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Title (metadata와 같게)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table of Contents (필요시)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Constraints/Caveats (선택 사항)" +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Design Details (선택 사항)" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "졸업 기준" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "참고로 이 문서는 위의 구조를 따릅니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:315 +msgid "Step 6: Run the Quickstart Project" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement " -"Doc + 1. With this number, it becomes easy to reference other proposals." +#: ../../source/docker/tutorial-quickstart-docker.rst:326 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번" -"호를 사용하면 다른 제안을 쉽게 참조할 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:332 +msgid "Follow the SuperExec logs to track the execution of the run:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 -msgid "" -"**status** (Required) The current status of the proposal. See [workflow]" -"(#workflow) for the possible states." +#: ../../source/docker/tutorial-quickstart-docker.rst:339 +msgid "Step 7: Update the Application" msgstr "" -"**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)" -"를 참조하세요." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:341 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply the " -"GitHub ID." -msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first submitted " -"in a PR." -msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "quickstart_docker/task.py" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#, fuzzy +msgid "Stop the current ClientApp containers:" +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "기본 이미지 빌드" + +#: ../../source/docker/tutorial-quickstart-docker.rst:363 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -"**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to this " -"one." -msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:378 +msgid "Run the updated project:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:385 +msgid "Step 8: Clean Up" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "" -"**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:387 +msgid "Remove the containers and the bridge network:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "워크플로우" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#, fuzzy +msgid "Where to Go Next" +msgstr "시작 위치" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the author, " -"who shepherds the enhancement. This person also has to find committers to " -"Flower willing to review the proposal." +#: ../../source/docker/tutorial-quickstart-docker.rst:401 +msgid ":doc:`enable-tls`" msgstr "" -"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있" -"어야 합니다. 따라서 개선 사항을 주도하는 사(보통 작성자)이 필요합니다. 이 사" -"람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement Doc " -"number, to `enhancements`. All enhancements start in `provisional` state as " -"part of a pull request. Discussions are done as part of the pull request " -"review." +#: ../../source/docker/tutorial-quickstart-docker.rst:402 +msgid ":doc:`persist-superlink-state`" msgstr "" -"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크" -"인되며, `NNNN`은 Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 " -"개선 사항은 pull request의 일부로 `잠정` 상태에서 시작됩니다. 토론은 pull " -"request 검토의 일부로 이루어집니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed to " -"`implementable`. The actual implementation is then done in separate pull " -"requests. These pull requests should mention the respective enhancement as " -"part of their description. After the implementation is done, the proposal " -"status is changed to `implemented`." +#: ../../source/docker/tutorial-quickstart-docker.rst:403 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실" -"제 구현은 별도의 pull requests를 통해 이루어집니다. 이러한 pull requests는 설" -"명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 '구" -"현됨'으로 변경됩니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"Under certain conditions, other states are possible. An Enhancement has the " -"following states:" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -"특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed out " -"and actively defined and discussed." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -"'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활" -"발하게 정의 및 논의되는 동안의 시작 단계입니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +msgid "Clone the Docker Compose ``complete`` directory:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 msgid "" -"`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement is " -"not moving forward." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -"`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니" -"다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower SuperNode를 실행합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 +msgid "" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 msgid "" -"Adding an additional process to the ones already provided by GitHub (Issues " -"and Pull Requests) adds more complexity and can be a barrier for potential " -"first-time contributors." +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추" -"가하면 더 복잡해지고 잠재적인 처음인 기여자에게는 장벽이 될 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden for " -"non-native English speakers." +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장" -"하는 것은 영어가 모국어가 아닌 사용자에게는 큰 부담이 될 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 이슈" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other issues. " -"The main issue is in discussing and reviewing an enhancement: GitHub issues " -"only have a single thread for comments. Enhancements usually have multiple " -"threads of discussion at the same time for various parts of the doc. " -"Managing these multiple discussions can be confusing when using GitHub " -"Issues." -msgstr "" -"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그" -"를 사용하여 다른 이슈와 구별하고 필터링할 수 있습니다. 주요 이슈는 개선 사항" -"에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습" -"니다. 개선 사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 " -"스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 토론을 관리하면 혼란스" -"러울 수 있습니다." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google 문서 도구" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +msgid "" +"``--build``: Rebuild the images for each service if they don't already " +"exist." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs " -"are hosted outside the project, their discoverability by the community needs " -"to be taken care of. A list of links to all proposals has to be managed and " -"made available for the community. Compared to shipping proposals as part of " -"Flower's repository, the potential for missing links is much higher." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 " -"외부에서 호스팅되므로 커뮤니티에서 검색할 수 있도록 관리해야 합니다. 모든 제" -"안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일" -"부로 제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 개선 문서" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +msgid "Step 3: Run the Quickstart Project" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "종합 평가 결과" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation results, " -"but it enables the user to fully customize result aggregation." +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -"Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 " -"집계를 완전히 사용자 지정할 수 있습니다." -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "사용자 지정 평가 결과 집계" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 -msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate " -"custom evaluation results coming from individual clients. Clients can return " -"custom metrics to the server by returning a dictionary:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +msgid "quickstart-compose/pyproject.toml" msgstr "" -"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오" -"는 사용자 지정 평가 결과를 집계할 수 있습니다. 클라이언트는 dictionary를 반환" -"하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +msgid "Execute the command to run the quickstart example:" msgstr "" -"그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하" -"는 메트릭을 집계할 수 있습니다:" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" -msgstr "SuperNodes 인증하기" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use to " -"verify the identities of each SuperNode connecting to a SuperLink. Flower " -"node authentication works similar to how GitHub SSH authentication works:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +msgid "Step 4: Update the Application" msgstr "" -"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있" -"는 인증된 SuperNodes에 대한 기본 지원을 제공합니다. Flower 노드 인증은 " -"GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "In the next step, change the application code." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared secret" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -"SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -"비밀 공유는 SuperNode에서 SuperLink로 토큰으로 전송된 메시지의 HMAC 값을 계산" -"하는 데 사용됩니다" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" -msgstr "SuperLink가 토큰을 확인합니다" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 msgid "" -"We recommend you to check out the complete `code example `_ demonstrating " -"federated learning with Flower in an authenticated setting." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"인증된 환경에서 Flower로 연합 학습을 시연하는 전체 '코드 예제 `_를 확인하는 것이 " -"좋습니다." -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions of " -"Flower." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -"이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설" -"명합니다." -#: ../../source/how-to-authenticate-supernodes.rst:18 -msgid "" -"For increased security, node authentication can only be used when encrypted " -"connections (SSL/TLS) are enabled." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -"보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 " -"경우에만 사용할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" -msgstr ":code:`SuperLink`에서 노드 인증 활성화" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +msgid "Run the updated quickstart example:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 -msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can find " -"the complete guide `here `_. After configuring secure connections, you can enable " -"client authentication in a long-running Flower :code:`SuperLink`. Use the " -"following terminal command to start a Flower :code:`SuperNode` that has both " -"secure connections and node authentication enabled:" -msgstr "" -"노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/" -"TLS 연결을 구성해야 합니다. 전체 가이드는 `여기 `_에서 확인할 수 있습니다. 보안 " -"연결을 구성한 후, 장기 실행하는 Flower :code:`SuperLink`에서 클라이언트 인증" -"을 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 보안 연결과 노드 인증이 " -"모두 활성화된 Flower :code:`SuperNode`를 시작하세요:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" -msgstr "인증 플래그를 세분화해 보겠습니다:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +msgid "Step 5: Persisting the SuperLink State" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV file " -"storing all known node public keys. You need to store all known node public " -"keys that are allowed to participate in a federation in one CSV file (:code:" -"`.csv`)." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"첫 번째 플래그 :code:`--auth-list-public-keys`는 알려진 모든 노드 공개키를 저" -"장하는 CSV 파일의 경로를 기대합니다. federation에 참여하도록 허용된 모든 알려" -"진 노드 공개 키를 하나의 CSV 파일(:code:`.csv`)에 저장해야 합니다." -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two known " -"node public keys." +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -"알려진 노드 공개키를 저장하는 유효한 CSV 파일은 쉼표로 구분하고 주석 없이 " -"OpenSSH 형식으로 키를 나열해야 합니다. 예를 들어, 두 개의 알려진 노드 공개키" -"가 포함된 CSV 파일이 포함된 코드 샘플을 참조하세요." -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code:`--" -"auth-superlink-public-key` expect paths to the server's private and public " -"keys. For development purposes, you can generate a private and public key " -"pair using :code:`ssh-keygen -t ecdsa -b 384`." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -"두 번째 및 세 번째 플래그 :code:`--auth-superlink-private-key` 및 :code:`--" -"auth-superlink-public-key`는 서버의 개인 및 공개 키의 경로를 예상합니다. 개" -"발 목적으로 :code:`ssh-keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍" -"을 생성할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +msgid "Run the command:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of known " -"nodes, you need to shut the server down, edit the CSV file, and start the " -"server again. Support for dynamically changing the set of known nodes is on " -"the roadmap to be released in Flower 1.10 (ETA: June)." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"Flower 1.9에서는 알려진 노드 공개키를 SuperLink에 동적으로 제거, 편집 또는 추" -"가하는 기능이 지원되지 않습니다. 알려진 노드 집합을 변경하려면 서버를 종료하" -"고 CSV 파일을 편집한 다음 서버를 다시 시작해야 합니다. 알려진 노드 집합을 동" -"적으로 변경하는 기능은 Flower 1.10(출시 예정일: 6월)에서 로드맵에 포함되어 있" -"습니다." -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" -msgstr ":code:`SuperNode`에서 노드 인증을 활성화합니다" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client (:code:" -"`SuperNode`). Use the following terminal command to start an authenticated :" -"code:`SuperNode`:" +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"장기 실행 중인 Flower 서버(:code:`SuperLink`)와 마찬가지로, 장기 실행 중인 " -"Flower 클라이언트(:code:`SuperNode`)에서도 노드 인증을 쉽게 활성화할 수 있습" -"니다. 다음 터미널 명령을 사용하여 인증된 :code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-authenticate-supernodes.rst:66 -msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the node's " -"private key file and the :code:`--auth-supernode-public-key` flag expects a " -"path to the node's public key file. For development purposes, you can " -"generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b " -"384`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -":code:`--auth-supernode-private-key` 플래그는 노드의 개인 키 파일 경로를, :" -"code:`--auth-supernode-public-key` 플래그는 노드의 공개 키 파일 경로를 예상합" -"니다. 개발 목적으로 :code:`ssh-keygen -t ecdsa -b 384`를 사용하여 개인 및 공" -"개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" -msgstr "보안 공지" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +msgid "Check the content of the ``state`` directory:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 msgid "" -"The system's security relies on the credentials of the SuperLink and each " -"SuperNode. Therefore, it is imperative to safeguard and safely store the " -"credentials to avoid security risks such as Public Key Infrastructure (PKI) " -"impersonation attacks. The node authentication mechanism also involves human " -"interaction, so please ensure that all of the communication is done in a " -"secure manner, using trusted communication methods." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"시스템의 보안은 SuperLink와 각SuperNode의 자격 증명에 의존합니다. 따라서 공개" -"키 기반구조(PKI) 사칭 공격과 같은 보안 위험을 피하기 위해 자격 증명을 보호하" -"고 안전하게 보관하는 것이 필수적입니다. 노드 인증 메커니즘에는 사람의 상호 작" -"용도 포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으" -"로 이루어지도록 하세요." -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "결론" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 msgid "" -"You should now have learned how to start a long-running Flower server (:code:" -"`SuperLink`) and client (:code:`SuperNode`) with node authentication " -"enabled. You should also know the significance of the private key and store " -"it safely to minimize security risks." +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"이제 노드 인증이 활성화된 상태에서 장기간 실행되는 Flower 서버(:code:" -"`SuperLink`)와 클라이언트(:code:`SuperNode`)를 시작하는 방법을 배웠을 것입니" -"다. 또한 보안 위험을 최소화하기 위해 개인키의 중요성을 알고 안전하게 보관해" -"야 합니다." -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "클라이언트 구성" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are, " -"for example, a popular way to control client-side hyperparameters from the " -"server." +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구" -"성 값은 다양한 용도로 사용할 수 있습니다. 예를 들어 서버에서 클라이언트 측 하" -"이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "구성 값" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys and " -"values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), " -"``int``, or ``str`` (or equivalent types in different languages). Here is an " -"example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +msgid "Step 7: Add another SuperNode" msgstr "" -"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), " -"``int`` 또는 ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전" -"으로 표현됩니다. 다음은 Python의 구성 사전 예제입니다:" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client using " -"gRPC, and then deserializes them back to Python dictionaries." +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -"Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현" -"으로 직렬화하고, gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python " -"dictionaries로 역직렬화합니다." -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 msgid "" -"Currently, there is no support for directly sending collection types (e.g., " -"``Set``, ``List``, ``Map``) as values in configuration dictionaries. There " -"are several workarounds to send collections as values by converting them to " -"one of the supported value types (and converting them back on the client-" -"side)." +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -"현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 " -"전송하는 기능은 지원되지 않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환" -"한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 있" -"습니다." -#: ../../source/how-to-configure-clients.rst:26 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and " -"then convert the JSON string back to a list of floating-point numbers on the " -"client." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +msgid "In ``compose.yml``, add the following:" msgstr "" -"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary" -"을 사용하여 JSON 문자열을 전송한 다음 클라이언트에서 다시 부동 소수점 숫자 목" -"록으로 변환할 수 있습니다." -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "기본 제공 전략을 통한 구성" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" -"The easiest way to send configuration values to clients is to use a built-in " -"strategy like :code:`FedAvg`. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the current " -"round. It then forwards the configuration dictionary to all the clients " -"selected during that round." +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제" -"공 전략을 사용하는 것입니다. 기본 제공 전략은 소위 구성 함수를 지원합니다. 구" -"성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니" -"다. 그런 다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of federated " -"learning, and (c) the number of epochs to train on the client-side. Our " -"configuration function could look like this:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -"간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) " -"현재 글로벌 연합 라운드, (c) 클라이언트 측에서 학습할 에포크 수를 전송하고 " -"싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter :code:" -"`on_fit_config_fn`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:" -"`on_fit_config_fn`을 사용하여 ``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-configure-clients.rst:56 -msgid "" -"One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +msgid "with-tls.yml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:67 -msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to " -"send different configuration values to `evaluate` (for example, to use a " -"different batch size)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. " -"다른 배치 크기를 사용하기 위해 다른 구성 값을 `evaluate`로 보내려고 할 수 있" -"기 때문에 이 함수는 별도의 함수입니다." -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" -"The built-in strategies call this function every round (that is, every time " -"`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling " -"`on_evaluate_config_fn` every round allows us to vary/change the config dict " -"over consecutive rounds. If we wanted to implement a hyperparameter " -"schedule, for example, to increase the number of local epochs during later " -"rounds, we could do the following:" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy." -"configure_fit` 또는 `Strategy.configure_evaluate`가 실행될 때마다). 매 라운드" -"마다 `on_evaluate_config_fn`을 호출하면 연속된 라운드에서 config dict를 변경/" -"변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 위해 하" -"이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "개별 클라이언트 구성" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +msgid "with-state.yml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#, fuzzy +msgid "Restart the services:" +msgstr "이미 *서버*를 시작할 수 있습니다:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +msgid "Step 9: Merge Multiple Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -"경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" -"This can be achieved by customizing an existing strategy or by :doc:" -"`implementing a custom strategy from scratch `. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the config " -"dict of a *single client* (only the first client in the list, the other " -"clients in this round to not receive this \"special\" config value):" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from " -"scratch `를 통해 수행할 수 있습니다. 다음은 사용" -"자 지정 ``\"hello\"'를 추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예" -"입니다: \"world\"`` 구성 키/값 쌍을 *단일 클라이언트*의 config dict에 추가합" -"니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한" -"\" 구성 값을 수신하지 않음):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "로깅 구성" - -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default following a " -"standard message format:" -msgstr "" -"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합" -"니다. 기본적으로 표준 메시지 형식에 따라 정보를 표시합니다:" - -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. :code:" -"`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +msgid "Step 10: Clean Up" msgstr "" -"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 " -"줄, 로그 메시지 자체 등 관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적" -"으로 다음과 같은 정보를 터미널에 표시합니다:" - -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" -msgstr "파일에 로그 저장" -#: ../../source/how-to-configure-logging.rst:36 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when using " -"the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation." -"start_simulation`). In some situations you might want to save this log to " -"disk. You can do so by calling the `fl.common.logger.configure() `_ function. " -"For example:" -msgstr "" -"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니" -"다. 이는 gRPC 기반 페더레이션(즉,:code:`fl.simulation.start_simulation`를 실" -"행하는 경우)과 :code:`VirtualClientEngine`을 사용하는 경우(즉, :코드:`fl." -"simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 " -"이 로그를 디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger." -"configure() `_ 함수를 호출하여 저장할 수 있습니다. 예를 들어:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#, fuzzy +msgid "Remove all services and volumes:" +msgstr "R에서 모든 항목을 제거합니다." -#: ../../source/how-to-configure-logging.rst:53 -msgid "" -"With the above, Flower will record the log you see on your terminal to :code:" -"`log.txt`. This file will be created in the same directory as were you are " -"running the code from. If we inspect we see the log above is also recorded " -"but prefixing with :code:`identifier` each line:" -msgstr "" -"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니" -"다. 이 파일은 코드를 실행한 디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보" -"면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 붙는 것을 확" -"인할 수 있습니다:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "빠른 시작 튜토리얼" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "나만의 메시지 기록" +#: ../../source/docker/use-a-different-version.rst:2 +#, fuzzy +msgid "Use a Different Flower Version" +msgstr "다른 Flower 버전 사용" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"You might expand the information shown by default with the Flower logger by " -"adding more messages relevant to your application. You can achieve this " -"easily as follows." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 " -"정보를 확장할 수 있습니다. 다음과 같이 쉽게 추가할 수 있습니다." +"다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: Flower nightly). 사용 가능한 모든 " +"버전은 `Docker Hub `__에 있습니다." -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/use-a-different-version.rst:9 +#, fuzzy msgid "" -"In this way your logger will show, in addition to the default messages, the " -"ones introduced by the clients as specified above." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메" -"시지가 표시됩니다." - -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "원격 서비스에 로그인" - -#: ../../source/how-to-configure-logging.rst:130 -msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a " -"host to which logs can be pushed (via :code:`POST`) through a native Python :" -"code:`logging.handler.HTTPHandler`. This is a particularly useful feature " -"in :code:`gRPC`-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a :code:`HTTPHandler` should you " -"wish to backup or analyze the logs somewhere else." -msgstr "" -"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python :" -"code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정" -"할 수 있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)" -"에서 로그를 수집하는 것이 번거로울 수 있는 :code:`gRPC` 기반 Federated 학습 " -"워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 로그" -"를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:" -"`HTTPHandler`를 지정할 수 있습니다." +"SuperNode Docker 이미지는 현재 1.9.0 야간 릴리스에서만 작동합니다. 안정 버전은 Flower 1.9.0(안정)이 " +"출시되면 사용할 수 있습니다(예상 출시일: 5월). SuperNode 야간 이미지는 같은 날 릴리스된 해당 SuperLink 및 " +"서버앱 야간 이미지와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 " +"``1.9.0.dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "SSL 연결 사용" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-enable-ssl-connections.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -"This guide describes how to a SSL-enabled secure Flower server (:code:" -"`SuperLink`) can be started and how a Flower client (:code:`SuperNode`) can " -"establish a secure connections to it." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -"이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하" -"는 방법과 Flower 클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하" -"는 방법을 설명합니다." +"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " +"`_으로 기존 머신러닝 워크로드의 federated 버전을 구축하기 " +"위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 사용하여 CIFAR-10 데이터 세트에서 컨볼루션 " +"신경망(일괄 정규화 레이어 포함)을 훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " +"연합식으로 ` 와 비교했을 때 몇 가지 사항만 " +"변경 하면 됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:7 -msgid "" -"A complete code example demonstrating a secure connection can be found `here " -"`_." -msgstr "" -"보안 연결을 보여주는 전체 코드 예제는 '여기 `_'에서 확인할 수 있습니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +msgid "Centralized Training" +msgstr "중앙 집중식 훈련" -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" -"The code example comes with a :code:`README.md` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less descriptive " -"on how it does so. Stick to this guide for a deeper introduction to the " -"topic." +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" -"코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. " -"이미 SSL을 사용하도록 설정되어 있지만 그 방법에 대한 설명이 부족할 수 있습니" -"다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." - -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "인증서" +"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 " +"아래와 같습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate " -"self-signed certificates. As this can become quite complex we are going to " -"ask you to run the script in :code:`examples/advanced-tensorflow/" -"certificates/generate.sh` with the following command sequence:" -msgstr "" -"SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 " -"가이드에서는 자체 서명된 인증서를 생성하겠습니다. 이 과정은 상당히 복잡할 수 " -"있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-tensorflow/" -"certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:29 -msgid "" -"This will generate the certificates in :code:`examples/advanced-tensorflow/." -"cache/certificates`." -msgstr "" -"이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서" -"가 생성됩니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +msgid "You can now run your machine learning workload:" +msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 msgid "" -"The approach for generating SSL certificates in the context of this example " -"can serve as an inspiration and starting point, but it should not be used as " -"a reference for production environments. Please refer to other sources " -"regarding the issue of correctly generating certificates for production " -"environments. For non-critical prototyping or research projects, it might be " -"sufficient to use the self-signed certificates generated using the scripts " -"mentioned in this guide." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"이 예의 맥락에서 SSL 인증서를 생성하는 접근 방식은 영감과 출발점이 될 수 있지" -"만 프로덕션 환경에 대한 참조로 사용해서는 안 됩니다. 프로덕션 환경용 인증서" -"를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프" -"로토타이핑 또는 연구 프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 " -"생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 " +"FedBN 내에서 하나의 서버와 두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" -msgstr "서버(SuperLink)" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +msgid "Federated Training" +msgstr "연합 훈련" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses " -"the previously generated certificates:" -msgstr "" -"다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)" -"를 시작합니다:" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." +msgstr "" +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 :code:`client.py`의 " +":code:`get_parameters`와 :code:`set_parameters` 함수만 수정해야 합니다. 그렇지 않은 경우 " +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 먼저 읽어보세요." -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private " -"key." +"Our example consists of one *server* and two *clients*. In FedBN, " +":code:`server.py` keeps unchanged, we can start the server directly." msgstr "" -"인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증" -"서, 서버 인증서 및 서버 개인 키입니다." +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " +"그대로 유지되므로 서버를 바로 시작할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:54 -msgid "Client (SuperNode)" -msgstr "클라이언트(SuperNode)" - -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" -"Use the following terminal command to start a client (SuperNode) that uses " -"the previously generated certificates:" +"Finally, we will revise our *client* logic by changing " +":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " +"we will exclude batch normalization parameters from model parameter list " +"when sending to or receiving from the server." msgstr "" -"다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트" -"(SuperNode)를 시작합니다:" +"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " +":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" +" 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:64 -msgid "" -"When setting :code:`root_certificates`, the client expects a file path to " -"PEM-encoded root certificates." -msgstr "" -"코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 " -"파일 경로를 예상합니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" -"You should now have learned how to generate self-signed certificates using " -"the given script, start an SSL-enabled server and have a client establish a " -"secure connection to it." +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 " -"시작하고, 클라이언트가 보안 연결을 설정하는 방법을 배웠을 것입니다." - -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" -msgstr "추가 리소스" - -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "" -"These additional sources might be relevant if you would like to dive deeper " -"into the topic of certificates:" -msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" - -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "'암호화하세요 `_'" - -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`인증봇 `_" +"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 두 클라이언트에서" +" FedBN으로 연합 학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "전략 구현" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 +#: ../../source/tutorial-quickstart-jax.rst:283 +msgid "Next Steps" +msgstr "다음 단계" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 msgid "" -"The strategy abstraction enables implementation of fully custom strategies. " -"A strategy is basically the federated learning algorithm that runs on the " -"server. Strategies decide how to sample clients, how to configure clients " -"for training, how to aggregate updates, and how to evaluate models. Flower " -"provides a few built-in strategies which are based on the same API described " -"below." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -"전략 추상화를 통해 완전한 맞춤형 전략을 구현할 수 있습니다. 전략은 " -"기본적으로 서버에서 실행되는 연합 학습 알고리즘입니다. 전략은 클라이언트를 " -"샘플링하는 방법, 학습을 위해 클라이언트를 구성하는 방법, 업데이트를 집계하는 " -"방법, 모델을 평가하는 방법을 결정합니다. Flower는 아래에 설명된 것과 동일한 " -"API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." +"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물론 이 예제는 두 " +"클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 " +"주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" +" 클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr ":code:`Strategy` 추상화" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"All strategy implementation are derived from the abstract base class :code:" -"`flwr.server.strategy.Strategy`, both built-in implementations and third " -"party implementations. This means that custom strategy implementations have " -"the exact same capabilities at their disposal as built-in ones." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"모든 전략 구현은 기본 제공 구현과 타사 구현 모두 추상 기본 클래스인 :code:" -"`flwr.server.strategy.Strategy`에서 파생됩니다. 즉, 사용자 정의 전략 구현은 " -"기본 제공 구현과 완전히 동일한 기능을 사용할 수 있습니다." - -#: ../../source/how-to-implement-strategies.rst:18 -msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" -msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" +"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. 여기서는 " +"PyTorch를 사용해 CIFAR-10 데이터 세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " +"`_ " +"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 작업을 소개합니다. 그런 다음 " +"centralized 훈련 코드를 기반으로 연합 방식 훈련을 실행합니다." -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived from " -"the abstract base class :code:`Strategy`) that implements for the previously " -"shown abstract methods:" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -"새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :" -"code:`class`(추상 기본 클래스 :code:`Strategy`에서 파생됨)를 구현하는 것을 의" -"미합니다:" - -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" - -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." - -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr ":code:`initialize_parameters` 메서드" +"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 일이 일어나고 있는지 더 자세히 설명하려면 공식 " +"`PyTorch 튜토리얼 " +"`_을 " +"참조하세요." -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning of " -"an execution. It is responsible for providing the initial global model " -"parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"Let's create a new file called :code:`cifar.py` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as :code:`torch` and :code:`torchvision`) need " +"to be imported. You can see that we do not import any package for " +"federated learning. You can keep all these imports as they are even when " +"we add the federated learning components at a later point." msgstr "" -"code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 " -"함수는 초기 전역 모델 파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으" -"로 제공하는 역할을 합니다." +"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 :code:`cifar.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, 필요한 모든 패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 " +"합니다. 연합 학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 구성 요소를 추가할 때에도 이러한 " +"모든 가져오기를 그대로 유지할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" -"Built-in strategies return user-provided initial parameters. The following " -"example shows how initial parameters can be passed to :code:`FedAvg`:" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in :code:`class Net()`." msgstr "" -"기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 " -"매개 변수를 :code:`FedAvg`에 전달하는 방법을 보여줍니다:" +"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 컨볼루션 신경망)는" +" :code:`class Net()`에 정의되어 있습니다." -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or :" -"code:`None`. If no parameters are returned from :code:" -"`initialize_parameters` (i.e., :code:`None`), the server will randomly " -"select one client and ask it to provide its parameters. This is a " -"convenience feature and not recommended in practice, but it can be useful " -"for prototyping. In practice, it is recommended to always use server-side " -"parameter initialization." -msgstr "" -"Flower 서버는 :code:`initialize_parameters`를 호출하여 :code:" -"`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니" -"다. :code:`initial_parameters`에서 반환되는 매개변수가 없는 경우(즉, :code:" -"`None`) 서버는 무작위로 클라이언트 하나를 선택하여 해당 클라이언트에 매개변수" -"를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 프로토" -"타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하" -"는 것이 좋습니다." +"The :code:`load_data()` function loads the CIFAR-10 training and test " +"sets. The :code:`transform` normalized the data after loading." +msgstr "" +":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:`transform`은 " +"로드 후 데이터를 정규화합니다." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint. It " -"is also the fundamental capability needed to implement hybrid approaches, " -"for example, to fine-tune a pre-trained model using federated learning." +"We now need to define the training (function :code:`train()`) which loops" +" over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -"서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 " -"체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. 또한 연합 학습을 " -"사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 " -"데 필요한 기본 기능입니다." - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr ":code:`configure_fit` 메서드" +"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 배치에 대해 하나의 최적화 단계를 수행하는 " +"학습(함수 :code:`train()`)을 정의해야 합니다." -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round of " -"training. What does *configure* mean in this context? Configuring a round " -"means selecting clients and deciding what instructions to send to these " -"clients. The signature of :code:`configure_fit` makes this clear:" +"The evaluation of the model is defined in the function :code:`test()`. " +"The function loops over all test samples and measures the loss of the " +"model based on the test dataset." msgstr "" -":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥" -"에서 *구성*은 무엇을 의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하" -"고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. code:" -"`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" +"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘플을 반복하고 테스트 데이터 세트에 따라" +" 모델의 손실을 측정합니다." -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations usually " -"perform the following steps in :code:`configure_fit`:" -msgstr "" -"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 " -"나타냅니다. 전략 구현은 일반적으로 :code:`configure_fit`에서 다음 단계를 " -"수행합니다:" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집" -"합)를 무작위로 샘플링합니다(각각 :code:`ClientProxy` 개체로 표시됨)" +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 것을 사용하여 하나의 " +"서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:" -"`config` dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" +"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)로 모델을 학습시키는데, 이를 중앙 집중식 " +"학습이라고 부릅니다. 이전 섹션에서 설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 사용해 보셨을 " +"것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 " +"합니다. 이는 상당한 노력이 필요할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate in a " -"round if the corresponding :code:`ClientProxy` is included in the list " -"returned from :code:`configure_fit`." -msgstr "" -"보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선" -"택 로직을 구현할 수 있습니다. 클라이언트는 :code:`configure_fit`에서 반환된 " -"목록에 해당 :code:`ClientProxy`가 포함된 경우에만 라운드에 참여합니다." +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies to " -"train, for example, different models on different clients, or use different " -"hyperparameters on different clients (via the :code:`config` dict)." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in :code:`cifar.py` for the *clients* that are connected to " +"the *server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 " -"클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 전송할 수 " -"있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 " -"클라이언트마다 다른 하이퍼파라미터를 사용하는 사용자 지정 전략을 사용할 수 " -"있습니다(:code:`config` dict를 통해)." - -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr ":code:`aggregate_fit` 메서드" +"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`cifar.py`의 코드를 " +"사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 업데이트합니다. " +"업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 " +"프로세스의 한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:129 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned by " -"the clients that were selected and asked to train in :code:`configure_fit`." +"Our example consists of one *server* and two *clients*. Let's set up " +":code:`server.py` first. The *server* needs to import the Flower package " +":code:`flwr`. Next, we use the :code:`start_server` function to start a " +"server and tell it to perform three rounds of federated learning." msgstr "" -"code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 " -"클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " +"*server*는 Flower 패키지 :code:`flwr`를 가져와야 합니다. 다음으로, :code:`start_server` " +"함수를 사용하여 서버를 시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "We can already start the *server*:" +msgstr "이미 *서버*를 시작할 수 있습니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via :code:" -"`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:" -"`results`, but also a list of :code:`failures`." +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined centralized training in :code:`cifar.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " +"update the parameters on our PyTorch model:" msgstr "" -"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과" -"를 얻을 수 있다는 보장은 없습니다(:code:`configure_fit`을 통해). 따라서 :" -"code:`aggregate_fit`은 :code:`results` 목록뿐만 아니라 :code:`failures` 목록" -"도 받습니다." +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`에서 이전에 정의한 " +"중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 " +"업데이트하기 위해 :code:`torch`도 가져와야 합니다:" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a " -"dictionary of aggregated metrics. The :code:`Parameters` return value is " -"optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -"code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 " -"dictionary를 반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`" -"이 제공된 결과가 집계에 충분하지 않다고 판단할 수 있으므로(예: 실패 수가 너" -"무 많음) 선택 사항입니다." +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 우리의 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`CifarClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 파이토치나 텐서플로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 " +"사용하는 경우 필요한 일부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습니다. " +"code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" +" 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr ":code:`configure_evaluate` 메서드" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +msgid ":code:`set_parameters`" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:166 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming round " -"of evaluation. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_evaluate` makes this clear:" -msgstr "" -":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. " -"이 문맥에서 *구성*은 무엇을 의미하나요? 라운드를 구성한다는 것은 클라이언트" -"를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. :" -"code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" +"set the model parameters on the local model that are received from the " +"server" +msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 +#: ../../source/tutorial-quickstart-jax.rst:168 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations usually " -"perform the following steps in :code:`configure_evaluate`:" -msgstr "" -"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 " -"나타냅니다. 전략 구현은 일반적으로 :code:`configure_evaluate`에서 다음 " -"단계를 수행합니다:" +"loop over the list of model parameters received as NumPy " +":code:`ndarray`'s (think list of neural network layers)" +msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#: ../../source/tutorial-quickstart-jax.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid ":code:`get_parameters`" +msgstr ":code:`get_parameters`" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 +#: ../../source/tutorial-quickstart-jax.rst:170 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"get the model parameters and return them as a list of NumPy " +":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" -"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:" -"`config` dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" +"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " +":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/how-to-implement-strategies.rst:283 -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate in a " -"round if the corresponding :code:`ClientProxy` is included in the list " -"returned from :code:`configure_evaluate`." -msgstr "" -"보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언" -"트 선택 로직을 구현할 수 있습니다. 클라이언트는 :code:`configure_evaluate`에" -"서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 라운드에 참여합니" -"다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid ":code:`fit`" +msgstr ":code:`fit`" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:176 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies to " -"evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` dict)." -msgstr "" -"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 " -"클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 전송할 수 " -"있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 " -"모델을 평가하거나 클라이언트마다 다른 하이퍼파라미터를 사용할 수 " -"있습니다(:code:`config` dict를 통해)." +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr ":code:`aggregate_evaluate` 메서드" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +msgid "train the model on the local training set" +msgstr "로컬 훈련 세트에서 모델을 훈련합니다" -#: ../../source/how-to-implement-strategies.rst:293 -msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in :code:" -"`configure_evaluate`." -msgstr "" -"code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요" -"청한 클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +msgid "get the updated local model weights and return them to the server" +msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" -#: ../../source/how-to-implement-strategies.rst:306 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via :code:" -"`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." -msgstr "" -"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과" -"를 얻을 수 있다는 보장은 없습니다(:code:`configure_evaluate`를 통해). 따라" -"서 :code:`aggregate_evaluate`는 :code:`results` 목록뿐만 아니라 :code:" -"`failures` 목록도 받습니다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid ":code:`evaluate`" +msgstr ":code:`evaluate`" -#: ../../source/how-to-implement-strategies.rst:308 -msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a " -"dictionary of aggregated metrics. The :code:`float` return value is optional " -"because :code:`aggregate_evaluate` might decide that the results provided " -"are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -"code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 " -"dictionary를 반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 " -"제공된 결과가 집계에 충분하지 않다고 판단할 수 있으므로(예: 실패 수가 너무 많" -"음) 선택 사항입니다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 +#: ../../source/tutorial-quickstart-jax.rst:177 +msgid "evaluate the updated model on the local test set" +msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr ":code:`evaluate` 메서드" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +msgid "return the local loss and accuracy to the server" +msgstr "로컬 손실 및 정확도를 서버에 반환합니다" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to :code:" -"`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to " -"perform both servers-side and client-side (federated) evaluation." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`cifar.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." msgstr "" -":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " -"code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 :code:`evaluate`" -"를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전" -"략을 사용할 수 있습니다." +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합니다. 따라서 여기서" +" 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +"Flower에 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습니다." -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 msgid "" -"The return value is again optional because the strategy might not need to " -"implement server-side evaluation or because the user-defined :code:" -"`evaluate` method might not complete successfully (e.g., it might fail to " -"load the server-side evaluation data)." +"All that's left to do it to define a function that loads both model and " +"data, creates a :code:`CifarClient`, and starts this client. You load " +"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP address we used in :code:`server.py`:" msgstr "" -"반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:" -"`evaluate` 메서드가 성공적으로 완료되지 않을 수 있기 때문에(예: 서버 측 평가 " -"데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." - -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "Flower 설치" - -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" -msgstr "Python 버전" - -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "안정적인 릴리즈 설치" +"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " +"작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과" +" 동일한 IP 주소를 지정하여 :code:`fl.client.start_client()` 함수로 " +":code:`CifarClient`를 시작합니다:" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" -msgstr "pip 사용" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 +#: ../../source/tutorial-quickstart-jax.rst:274 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" -"Stable releases are available on `PyPI `_::" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -"안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 " -"있습니다::" +"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) PyTorch 프로젝트가 두 클라이언트에서 연합 " +"학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" -msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 " -"``simulation``extra와 함께 설치해야 합니다:" - -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" -msgstr "conda(또는 mamba) 사용" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" +msgstr "" +"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코드) " +"`_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 " +"다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서" +" 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "차등 프라이버시" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first need " -"to run the following::" -msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." +msgstr "" +"의료, 금융 거래, 사용자 선호도 등과 같은 데이터 세트의 정보는 가치 있고 과학적 혁신의 잠재력을 지니고 있으며 중요한 비즈니스 " +"인사이트를 제공합니다. 그러나 이러한 데이터는 또한 민감한 정보이며 개인의 프라이버시를 침해할 위험이 있습니다." -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed " -"with ``conda``::" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" +"익명화와 같은 기존 방법만으로는 재식별 및 데이터 연결과 같은 공격으로 인해 효과가 없습니다. 그래서 차등 프라이버시가 " +"등장했습니다. 차등 프라이버시는 개인의 개인 정보 보호를 보장하면서 데이터를 분석할 수 있는 가능성을 제공합니다." -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" -msgstr "또는 ``mamba``::" +#: ../../source/explanation-differential-privacy.rst:12 +msgid "" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." +msgstr "" +"하나의 레코드(예: 앨리스의 데이터)를 제외하고는 동일한 두 개의 데이터 세트가 있다고 상상해 보세요. 차등 프라이버시(DP)는 " +"평균 소득 계산과 같은 모든 분석(M)이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 비슷할 " +"것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "설치 확인" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" +msgstr "DP 소개" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:22 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to " -"the command line::" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모" -"든 것이 정상적으로 작동하면 명령줄에 Flower의 버전이 출력됩니다:" +"DP를 달성하기 위해 가장 일반적으로 사용되는 메커니즘 중 하나는 분석의 전반적인 정확도를 유지하면서 데이터에서 각 개인의 기여도를" +" 가릴 수 있도록 분석 결과에 충분한 노이즈를 추가하는 것입니다." -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "고급 설치 옵션" +#: ../../source/explanation-differential-privacy.rst:25 +msgid "Formal Definition" +msgstr "공식 정의" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" -msgstr "Docker를 통해 설치" +#: ../../source/explanation-differential-privacy.rst:26 +msgid "" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" +msgstr "" +"차등 프라이버시(DP)는 공격자가 무작위 알고리즘의 출력을 통해 유추할 수 있는 정보에 대해 통계적 보장을 제공합니다. 이는 " +"노이즈를 추가하여 알고리즘의 출력에 대한 한 개인의 영향력에 대한 무조건적인 상한선을 제공합니다[1]. 무작위 메커니즘 M은 하나의" +" 레코드만 다른 두 개의 인접 데이터베이스인 D:sub:`1`과 D:sub:`2`의 경우, 가능한 모든 출력 S ⊆ " +"Range(A)에 대해 (:math:`\\epsilon`, :math:`\\delta`)-차등 프라이버시를 제공합니다:" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy +msgid "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" - -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "사전 릴리즈 설치" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as pre-" -"release versions (alpha, beta, release candidate) before the stable release " -"happens::" -msgstr "" -"새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버" -"전(알파, 베타, 릴리즈 후보)으로 제공되는 경우가 있습니다:" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." +msgstr "" +"프라이버시 예산이라고도 하는 :math:`\\epsilon` 매개변수는 프라이버시 손실을 측정하는 지표입니다. 이 매개변수는 " +"프라이버시와 효용의 균형을 제어하며, :math:`\\epsilon` 값이 낮을수록 프라이버시 수준이 높지만 효용도 감소할 가능성이" +" 높습니다. math:`\\delta` 매개변수는 상한값인 :math:`\\epsilon`이 적용되지 않는 작은 확률을 설명합니다." +" 차등 프라이버시를 달성하는 데 필요한 노이즈의 양은 출력의 감도에 비례하며, 이는 단일 레코드의 포함 또는 제거로 인한 출력의 " +"최대 변화를 측정합니다." + +#: ../../source/explanation-differential-privacy.rst:45 +msgid "Differential Privacy in Machine Learning" +msgstr "머신 러닝의 차등 프라이버시" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:46 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases " -"should be installed with the ``simulation`` extra::" -msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 " -"``simulation`` extra와 함께 설치해야 합니다:" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." +msgstr "" +"머신 러닝에서 DP를 활용하여 학습 데이터의 개인정보를 보호할 수 있습니다. 차등 비공개 머신 러닝 알고리즘은 알고리즘이 개별 " +"데이터 포인트에 대한 특정 정보를 학습하지 못하도록 하여 모델이 민감한 정보를 노출하지 않도록 하는 방식으로 설계되었습니다. " +"노이즈가 도입되는 단계에 따라 머신 러닝 알고리즘에 DP를 적용하는 다양한 방법이 존재합니다. 한 가지 방법은 학습 데이터(특징 " +"또는 레이블)에 노이즈를 추가하는 것이고, 다른 방법은 모델 학습 중에 손실 함수의 기울기에 노이즈를 주입하는 것입니다. 또한 " +"이러한 노이즈를 모델의 출력에 통합할 수도 있습니다." -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "야간 릴리즈 설치" +#: ../../source/explanation-differential-privacy.rst:53 +msgid "Differential Privacy in Federated Learning" +msgstr "연합 학습의 차등 프라이버시" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"The latest (potentially unstable) changes in Flower are available as nightly " -"releases::" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩" -"니다:" +"연합 학습은 여러 당사자가 원시 데이터를 공유하지 않고도 공동으로 모델을 학습할 수 있는 데이터 최소화 접근 방식입니다. 그러나 " +"연합 학습은 새로운 개인정보 보호 문제를 야기하기도 합니다. 당사자와 중앙 서버 간의 모델 업데이트는 로컬 데이터에 대한 정보를 " +"유출할 수 있습니다. 이러한 유출은 멤버십 추론 및 속성 추론 공격이나 모델 반전 공격과 같은 공격에 악용될 수 있습니다." -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` should " -"be installed with the ``simulation`` extra::" -msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 " -"``simulation`` extr와 함께 설치해야 합니다::" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP는 연합 학습에서 클라이언트의 데이터에 대한 개인 정보 보호를 제공하는 데 중요한 역할을 할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "모니터 시뮬레이션" +#: ../../source/explanation-differential-privacy.rst:60 +msgid "" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." +msgstr "" +"개인 정보 제공의 세분성 또는 노이즈 추가 위치에 따라 연합 학습에는 다양한 형태의 DP가 존재합니다. 이 설명에서는 노이즈가 " +"추가되는 위치에 따라 서버(중앙이라고도 함) 또는 클라이언트(로컬이라고도 함)에서의 연합 학습에서 DP를 활용하는 두 가지 접근 " +"방식에 중점을 둡니다." -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:63 msgid "" -"Flower allows you to monitor system resources while running your simulation. " -"Moreover, the Flower simulation engine is powerful and enables you to decide " -"how to allocate resources per client manner and constrain the total usage. " -"Insights from resource consumption can help you make smarter decisions and " -"speed up the execution time." +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있" -"습니다. 또한 Flower 시뮬레이션 엔진은 강력하며 클라이언트별 리소스 할당 방법" -"을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통" -"해 더 현명한 결정을 내리고 실행 시간을 단축할 수 있습니다." +"**중앙 차등 프라이버시**: DP는 서버에서 적용되며 집계된 모델이 각 클라이언트의 데이터에 대한 정보를 유출하는 것을 방지하는 " +"것이 목표입니다." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:65 msgid "" -"The specific instructions assume you are using macOS and have the `Homebrew " -"`_ package manager installed." +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관" -"리자가 설치되어 있다고 가정합니다." +"**로컬 차등 프라이버시**: DP는 정보를 서버로 보내기 전에 클라이언트 측에서 적용되며, 서버로 전송되는 업데이트가 클라이언트 " +"데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "다운로드" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +msgid "Central Differential Privacy" +msgstr "중앙 차등 프라이버시" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"`Prometheus `_ is used for data collection, while " -"`Grafana `_ will enable you to visualize the collected " -"data. They are both well integrated with `Ray `_ which " -"Flower uses under the hood." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " -"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 " -"도구는 모두 Flower가 내부적으로 사용하는 `Ray `_와 잘 통" -"합되어 있습니다." +"사용자 수준 DP라고도 하는 이 접근 방식에서는 중앙 서버가 전역적으로 집계된 매개변수에 노이즈를 추가하는 역할을 담당합니다. " +"서버에 대한 신뢰가 필요하다는 점에 유의해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-differential-privacy.rst:76 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" +"연합 학습에서 중앙 DP를 구현하는 방법은 여러 가지가 있지만, 여기서는 [2]와 [3]에서 제안한 알고리즘에 집중합니다. 전반적인" +" 접근 방식은 클라이언트가 전송한 모델 업데이트를 잘라내고 집계된 모델에 약간의 노이즈를 추가하는 것입니다. 각 반복에서 특정 " +"확률로 훈련할 무작위 클라이언트 세트가 선택됩니다. 각 클라이언트는 자체 데이터에 대해 로컬 학습을 수행합니다. 그런 다음 각 " +"클라이언트의 업데이트는 특정 값 `S`(민감도 `S`)에 의해 잘립니다. 이렇게 하면 개별 클라이언트의 영향을 제한할 수 있어 " +"개인정보 보호에 중요하고 견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 방식은 클라이언트 모델 업데이트의" +" `L2` 규범을 제한하여 더 큰 업데이트가 규범 `S`에 맞도록 축소되도록 하는 것입니다." -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "M1 Mac을 사용 중이라면:" - -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "이전 세대 Intel Mac 장치에서는:" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" +msgstr "클리핑" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"Open the respective configuration files and change them. Depending on your " -"device, use one of the two following commands:" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니" -"다:" +"그 후 가우시안 메커니즘을 사용하여 모든 클라이언트의 업데이트 합계를 왜곡하기 위해 노이즈를 추가합니다. 노이즈의 양은 감도 값에 " +"따라 조정되어 프라이버시 보장을 얻습니다. 가우시안 메커니즘은 `N (0, σ²)`에서 샘플링된 노이즈와 함께 사용됩니다. 여기서 " +"`σ = (noise_scale * S) / (샘플링된 클라이언트 수)`입니다." -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" +msgstr "클리핑" + +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"and then delete all the text in the file and paste a new Prometheus config " -"you see below. You may adjust the time intervals to your requirements:" -msgstr "" -"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정" -"을 붙여넣습니다. 요구 사항에 따라 시간 간격을 조정할 수 있습니다:" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "중앙 DP에서 일반적으로 사용되는 클리핑에는 고정 클리핑과 조정 클리핑의 두 가지 형태가 있습니다." -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"Now after you have edited the Prometheus configuration, do the same with the " -"Grafana configuration files. Open those using one of the following commands " -"as before:" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 " -"수행합니다. 이전과 마찬가지로 다음 명령 중 하나를 사용하여 파일을 엽니다:" +"**고정 클리핑** : 클라이언트의 업데이트 크기에 대해 미리 정의된 고정 임계값이 설정됩니다. 이 임계값을 초과하는 모든 " +"업데이트는 임계값으로 다시 클리핑됩니다." -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:100 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." +"**조정 클리핑** : 클리핑 임계값은 관찰된 업데이트 분포에 따라 동적으로 조정됩니다[4]. 즉, 클리핑 값은 업데이트 표준 " +"분포의 사분위수에 따라 라운드가 진행되는 동안 조정됩니다." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"Congratulations, you just downloaded all the necessary software needed for " -"metrics tracking. Now, let’s start it." -msgstr "" -"축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이" -"제 시작해 보겠습니다." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모델 복잡성 등 다양한 요인에 따라 달라집니다." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "매트릭 트래킹" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +msgid "Local Differential Privacy" +msgstr "로컬 차등 프라이버시" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:107 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해" -"야 합니다." - -#: ../../source/how-to-monitor-simulation.rst:97 -msgid "" -"Please include the following argument in your Python code when starting a " -"simulation." -msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." +"이 접근 방식에서는 각 클라이언트가 DP를 수행할 책임이 있습니다. 로컬 DP는 완전히 신뢰할 수 있는 애그리게이터가 필요하지 " +"않지만, 로컬 DP는 중앙 DP에 비해 정확도는 떨어져도 개인 정보 보호는 더 우수하다는 점에 유의해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "이제 워크로드를 시작할 준비가 되었습니다." +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "이 설명에서는 로컬 DP를 달성하는 두 가지 형태에 중점을 둡니다:" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -"Shortly after the simulation starts, you should see the following logs in " -"your terminal:" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -"시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" +"각 클라이언트는 로컬 업데이트를 서버로 보내기 전에 로컬 업데이트에 노이즈를 추가합니다. 로컬 모델의 감도를 ∆로 간주하여 가우시안" +" 노이즈가 σ의 노이즈 스케일로 적용되어 (:math:`\\epsilon`, :math:`\\delta`)-DP를 달성하기 위해, " +"여기서 σ는 노이즈 스케일입니다:" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "``_ 에서 모든 것을 볼 수 있습니다." - -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the " -"lowest option)." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -"Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니" -"다." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the right-" -"up corner, “View in Grafana”. Please note that the Ray dashboard is only " -"accessible during the simulation. After the simulation ends, you can only " -"use Grafana to explore the metrics. You can start Grafana by going to " -"``http://localhost:3000/``." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인" -"할 수도 있습니다. Ray 대시보드는 시뮬레이션 중에만 액세스할 수 있다는 점에 유" -"의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " -"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." +"각 클라이언트는 로컬 트레이닝(DP-SGD) 중에 모델의 gradient에 노이즈를 추가합니다. 보다 구체적으로, 이 접근 " +"방식에서는 gradient이 클리핑되고 보정된 노이즈가 gradient에 주입됩니다." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." -msgstr "" -"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 " -"중인 동안 컴퓨터에서 포트 :code:`3000` 등을 차단하므로 이 작업이 중요합니다." +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "이 두 가지 접근 방식은 서로 다른 수준의 개인정보 보호 기능을 제공한다는 점에 유의하세요." -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "리소스 할당" +#: ../../source/explanation-differential-privacy.rst:131 +msgid "**References:**" +msgstr "**참고:**" -#: ../../source/how-to-monitor-simulation.rst:134 -msgid "" -"You must understand how the Ray library works to efficiently allocate system " -"resources to simulation clients on your own." -msgstr "" -"Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리" -"소스를 효율적으로 할당할 수 있습니다." +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 외. 차등 프라이버시의 알고리즘적 기초." -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-differential-privacy.rst:135 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of them, " -"nor that the model training happens at all of them simultaneously. You will " -"learn more about that in the later part of this blog. You can check the " -"system resources by running the following:" -msgstr "" -"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능" -"한 모든 리소스를 사용하여 시작되며, 이 리소스는 클라이언트 간에 공유됩니다. " -"그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동" -"시에 모델 학습이 이루어지는 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그" -"의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" - -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." +msgstr "[2] McMahan 외. 차등적 개인 반복 언어 모델 학습." -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do the " -"following (you don't need to overwrite all of them):" -msgstr "" -"그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다" -"(모두 덮어쓸 필요는 없음):" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 외. 차등적 개인 연합 학습: 고객 수준의 관점." -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." +#: ../../source/explanation-differential-privacy.rst:139 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgstr "[4] Galen 외. 조정형 클리핑을 통한 차등적 개인 학습." -#: ../../source/how-to-monitor-simulation.rst:205 -msgid "" -"Now comes the crucial part. Ray will start a new client only when it has all " -"the required resources (such that they run in parallel) when the resources " -"allow." -msgstr "" -"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리" -"소스가 있을 때(병렬로 실행되는 등) 새 클라이언트를 시작합니다." +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "연합 평가" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"In the example above, only one client will be run, so your clients won't run " -"concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two " -"clients and therefore enable them to run concurrently. Be careful not to " -"require more resources than available. If you specified :code:" -"`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs " -"but decided to set 1 in :code:`ray_init_args`)." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않" -"습니다. :code:`client_num_gpus = 0.5` 를 설정하면 두 개의 클라이언트를 실행" -"할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소" -"스를 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬" -"레이션이 시작되지 않습니다(GPU가 2개이지만 :code:`ray_init_args`에서 1개를 설" -"정한 경우에도 마찬가지입니다)." - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "자주 묻는 질문" +"연합 학습 시스템에서 모델을 평가하는 데는 중앙 집중식(또는 서버 측) 평가와 연합(또는 클라이언트 측) 평가라는 두 가지 주요 " +"접근 방식이 있습니다." -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "질문: 기록된 메트릭이 보이지 않습니다." +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "중앙 집중식 평가" -#: ../../source/how-to-monitor-simulation.rst:216 -msgid "" -"A: The timeframe might not be properly set. The setting is in the top right " -"corner (\"Last 30 minutes\" by default). Please change the timeframe to " -"reflect the period when the simulation was running." -msgstr "" -"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다" -"(기본값은 '지난 30분'). 시뮬레이션이 실행된 기간을 반영하도록 기간을 변경해 " -"주세요." +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "기본 제공 전략" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana server " -"is running and refresh this page” after going to the Metrics tab in Ray " -"Dashboard." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동" -"한 후 Grafana 서버가 실행 중인지 확인하고 이 페이지를 새로고침하세요.\"라는 " -"메시지가 표시됩니다." +"모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 " +"입력으로 받아 평가 결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running services" -msgstr "" -"A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" +#: ../../source/explanation-federated-evaluation.rst:58 +msgid "Custom Strategies" +msgstr "사용자 정의 전략" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-federated-evaluation.rst:60 msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"The :code:`Strategy` abstraction provides a method called " +":code:`evaluate` that can directly be used to evaluate the current global" +" model parameters. The current server implementation calls " +":code:`evaluate` after parameter aggregation and before federated " +"evaluation (see next paragraph)." msgstr "" -"Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다." -"\"라는 메시지가 표시됩니다." +"코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있는 :코드:`평가`라는 메서드를 제공합니다. 현재 " +"서버 구현에서는 매개변수 집계 후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." -#: ../../source/how-to-monitor-simulation.rst:228 -msgid "" -"A: Either the simulation has already finished, or you still need to start " -"Prometheus." -msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." +#: ../../source/explanation-federated-evaluation.rst:65 +msgid "Federated Evaluation" +msgstr "연합 평가" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "리소스" +#: ../../source/explanation-federated-evaluation.rst:68 +msgid "Implementing Federated Evaluation" +msgstr "연합 평가 구현" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "" -"Ray Dashboard: ``_" -msgstr "" -"Ray 대시보드: ``_" - -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" -msgstr "Ray 메트릭: ``_" +"Client-side evaluation happens in the :code:`Client.evaluate` method and " +"can be configured from the server side." +msgstr "클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에서 구성할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" -msgstr "Docker를 사용하여 Flower 실행" +#: ../../source/explanation-federated-evaluation.rst:101 +msgid "Configuring Federated Evaluation" +msgstr "연합 평가 구성" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-federated-evaluation.rst:103 msgid "" -"The simplest way to get started with Flower is by using the pre-made Docker " -"images, which you can find on `Docker Hub `__. Supported architectures include ``amd64`` and ``arm64v8``." -msgstr "" -"Flower를 시작하는 가장 간단한 방법은 `Docker Hub `__에서 찾을 수 있는 미리 만들어진 Docker 이미지를 사용하는 것입니다. 지" -"원되는 아키텍처는 ``amd64`` 및 ``arm64v8``입니다." - -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 지원합니다:" -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/explanation-federated-evaluation.rst:105 msgid "" -"If you do not see the version of Docker but instead get an error saying that " -"the command was not found, you will need to install Docker first. You can " -"find installation instruction `here `_." +":code:`fraction_evaluate`: a :code:`float` defining the fraction of " +"clients that will be selected for evaluation. If " +":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " +"are connected to the server, then :code:`10` will be randomly selected " +"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " +"federated evaluation will be disabled." msgstr "" -"전이 표시되지 않고 대신 명령을 찾을 수 없다는 오류가 표시되는 경우 먼저 " -"Docker를 설치해야 합니다. `여기 `_에서 " -"설치 지침을 찾을 수 있습니다." +":code:`fraction_evaluate`: 평가를 위해 선택될 클라이언트의 비율을 정의하는 :code:`float`입니다. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`100` 클라이언트가 서버에 연결되어 " +"있는 경우 :code:`10`이 평가를 위해 무작위로 선택됩니다. code:`fraction_evaluate`가 " +":code:`0.0`으로 설정된 경우 연합 평가가 비활성화됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:21 +#: ../../source/explanation-federated-evaluation.rst:106 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to avoid " -"using ``sudo``, you can follow the `Post-installation steps `_ on the official Docker " -"website." +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " +"clients to be selected for evaluation. If :code:`fraction_evaluate` is " +"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " +":code:`100` clients are connected to the server, then :code:`20` clients " +"will be selected for evaluation." msgstr "" -"Linux에서 Docker 명령을 실행하려면 ``sudo`` 권한이 필요합니다. ``sudo`` 를 " -"사용하지 않으려면 공식 Docker 웹사이트의 `Post-installation steps " -"`_를 따르세요." +":code:`min_evaluate_clients`: 평가를 위해 선택할 최소 클라이언트 수. :code:`int`. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`fraction_evaluate`가 " +"20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 " +"선택됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/explanation-federated-evaluation.rst:107 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode " -"and ServerApp image must have the same version when running together. This " -"guarantees seamless integration and avoids potential conflicts or issues " -"that may arise from using different versions." +":code:`min_available_clients`: an :code:`int` that defines the minimum " +"number of clients which need to be connected to the server before a round" +" of federated evaluation can start. If fewer than " +":code:`min_available_clients` are connected to the server, the server " +"will wait until more clients are connected before it continues to sample " +"clients for evaluation." msgstr "" -"최적의 성능과 호환성을 보장하려면 SuperLink, SuperNode 및 ServerApp 이미지를 " -"함께 실행할 때 버전이 동일해야 합니다. 이렇게 하면 원활한 통합을 보장하고 서" -"로 다른 버전을 사용할 때 발생할 수 있는 잠재적인 충돌이나 문제를 방지할 수 있" -"습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:32 -msgid "Flower SuperLink" -msgstr "Flower SuperLink" - -#: ../../source/how-to-run-flower-using-docker.rst:35 -msgid "Quickstart" -msgstr "빠른 시작" - -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "Flower를 사용해보고 싶다면 다음 명령을 사용하면 됩니다:" +":code:`min_available_clients`: federated 평가 단계를 시작하기 전에 서버에 연결해야 하는 최소 " +"클라이언트 수를 정의하는 :code:`int`입니다. 서버에 연결된 클라이언트가 " +":code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트가 연결될 때까지 기다렸다가 평가를 위한 " +"클라이언트 샘플링을 계속합니다." -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/explanation-federated-evaluation.rst:108 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker Hub. " -"The tag specifies the Flower version. In this case, Flower 1.8.0. The ``--" -"rm`` flag tells Docker to remove the container after it exits." +":code:`on_evaluate_config_fn`: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -"이 명령은 Docker Hub에서 ``1.8.0`` 태그가 있는 Docker 이미지를 가져옵니다. " -"이 태그는 Flower 버전을 지정합니다. 이 경우, Flower 1.8.0입니다. '`--rm`` 플" -"래그는 컨테이너가 종료된 후 컨테이너를 제거하도록 Docker에 지시합니다." +"code:`on_evaluate_config_fn`: 선택한 클라이언트로 전송할 구성 사전을 반환하는 함수입니다. 이 함수는 각 " +"단계 중에 호출되며, 서버 측에서 클라이언트 측 평가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 수" +" 구성)." -#: ../../source/how-to-run-flower-using-docker.rst:49 -msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container starts. " -"We will show below how to save the state in a file on your host system." -msgstr "" -"기본적으로 Flower SuperLink는 상태를 in-memory에 유지합니다. Docker 플래그 " -"`--rm``을 사용하는 경우 컨테이너 시작 사이에 상태가 유지되지 않습니다. 아래에" -"서 호스트 시스템의 파일에 상태를 저장하는 방법을 보여드리겠습니다." +#: ../../source/explanation-federated-evaluation.rst:135 +msgid "Evaluating Local Model Updates During Training" +msgstr "훈련 중 로컬 모델 업데이트 평가" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/explanation-federated-evaluation.rst:137 msgid "" -"The ``-p :`` flag tells Docker to map the ports ``9091``/" -"``9092`` of the host to ``9091``/``9092`` of the container, allowing you to " -"access the Driver API on ``http://localhost:9091`` and the Fleet API on " -"``http://localhost:9092``. Lastly, any flag that comes after the tag is " -"passed to the Flower SuperLink. Here, we are passing the flag ``--insecure``." +"Model parameters can also be evaluated during training. " +":code:`Client.fit` can return arbitrary evaluation results as a " +"dictionary:" msgstr "" -"``-p :`` 플래그는 호스트의 포트 ``9091``/``9092``를 컨테이너" -"의 ``9091``/``9092``에 매핑하여 ``http://localhost:9091``의 드라이버 API와 " -"``http://localhost:9092``의 Fleet API에 액세스할 수 있도록 Docker에 지시합니" -"다. 마지막으로, 태그 뒤에 오는 모든 플래그는 Flower SuperLink에 전달됩니다. " -"여기서는 ``--insecure``플래그를 전달합니다." +"모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 평가 결과를 dictionary로 " +"반환할 수 있습니다:" + +#: ../../source/explanation-federated-evaluation.rst:177 +msgid "Full Code Example" +msgstr "전체 코드 예제" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 +#: ../../source/explanation-federated-evaluation.rst:179 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly recommend " -"enabling `SSL `__ when deploying to a " -"production environment." +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -"``--insecure`` 플래그는 안전하지 않은 통신(HTTPS가 아닌 HTTP 사용)을 활성화하" -"며 테스트 목적으로만 사용해야 합니다. 프로덕션 환경에 배포할 때는 `SSL " -"`__을 활성화할 것을 강력히 권장합니" -"다." +"연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 " +"동일한 접근 방식을 적용할 수 있음)를 참조하세요: " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:65 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -"'`--help``을 사용하면 SuperLink가 지원하는 모든 플래그를 볼 수 있습니다:" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" -msgstr "호스트 시스템에 상태를 저장할 볼륨 마운트하기" +#: ../../source/explanation-flower-architecture.rst:3 +msgid "Flower Architecture" +msgstr "Flower 아키텍처" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/explanation-flower-architecture.rst:5 msgid "" -"If you want to persist the state of the SuperLink on your host system, all " -"you need to do is specify a directory where you want to save the file on " -"your host system and a name for the database file. By default, the SuperLink " -"container runs with a non-root user called ``app`` with the user ID " -"``49999``. It is recommended to create new directory and change the user ID " -"of the directory to ``49999`` to ensure the mounted directory has the proper " -"permissions. If you later want to delete the directory, you can change the " -"user ID back to the current user ID by running ``sudo chown -R $USER:$(id -" -"gn) state``." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -"호스트 시스템에서 SuperLink의 상태를 유지하려면 호스트 시스템에서 파일을 저장" -"할 디렉터리와 데이터베이스 파일의 이름을 지정하기만 하면 됩니다. 기본적으로 " -"SuperLink 컨테이너는 사용자 ID가 ``49999``인 ``app``이라는 루트가 아닌 사용자" -"로 실행됩니다. 마운트된 디렉터리에 적절한 권한이 있는지 확인하려면 새 디렉터" -"리를 생성하고 디렉터리의 사용자 ID를 ``49999``로 변경하는 것이 좋습니다. 나중" -"에 디렉터리를 삭제하려면 ``sudo chown -R $USER:$(id -gn) state``를 실행하여 " -"사용자 ID를 현재 사용자 ID로 다시 변경할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/explanation-flower-architecture.rst:8 msgid "" -"In the example below, we create a new directory, change the user ID and tell " -"Docker via the flag ``--volume`` to mount the local ``state`` directory into " -"the ``/app/state`` directory of the container. Furthermore, we use the flag " -"``--database`` to specify the name of the database file." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -"아래 예에서는 새 디렉터리를 생성하고, 사용자 ID를 변경하고, 플래그 ``--" -"volume``을 통해 Docker에게 로컬 ``state`` 디렉터리를 컨테이너의 ``/app/" -"state`` 디렉터리에 마운트하도록 지시합니다. 또한 ``--database`` 플래그를 사용" -"하여 데이터베이스 파일의 이름을 지정합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:95 -msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the SuperLink " -"with an empty database, simply remove the ``state.db`` file." -msgstr "" -"SuperLink가 시작되자마자 호스트 시스템의 ``state`` 디렉터리에 ``state.db`` 파" -"일이 생성됩니다. 파일이 이미 존재하는 경우 SuperLink는 파일에서 상태를 복원하" -"려고 시도합니다. 빈 데이터베이스로 SuperLink를 시작하려면 ``state.db`` 파일" -"을 제거하면 됩니다." - -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -msgid "Enabling SSL for secure connections" -msgstr "보안 연결을 위한 SSL 사용 설정" -#: ../../source/how-to-run-flower-using-docker.rst:102 +#: ../../source/explanation-flower-architecture.rst:12 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-encoded " -"private key and a PEM-encoded certificate chain." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -"SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 " -"PEM으로 인코딩된 인증서 체인이 필요합니다." -#: ../../source/how-to-run-flower-using-docker.rst:106 -msgid "" -"For testing purposes, you can generate your own self-signed certificates. " -"The `Enable SSL connections `__ page contains a section that will " -"guide you through the process." +#: ../../source/explanation-flower-architecture.rst:16 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -"테스트 목적으로 자체 서명된 인증서를 생성할 수 있습니다. 'SSL 연결 사용 " -"`__ 페이지에 프로세스를 안내하는 섹션이 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:110 -msgid "" -"Assuming all files we need are in the local ``certificates`` directory, we " -"can use the flag ``--volume`` to mount the local directory into the ``/app/" -"certificates/`` directory of the container. This allows the SuperLink to " -"access the files within the container. The ``ro`` stands for ``read-only``. " -"Docker volumes default to ``read-write``; that option tells Docker to make " -"the volume ``read-only`` instead. Finally, we pass the names of the " -"certificates and key file to the SuperLink with the ``--ssl-ca-certfile``, " -"``--ssl-certfile`` and ``--ssl-keyfile`` flag." -msgstr "" -"필요한 모든 파일이 로컬``certificates`` 디렉터리에 있다고 가정하면, ``--" -"volume``플래그를 사용하여 로컬 디렉터리를 컨테이너의 ``/app/certificates/`` " -"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperLink 가 컨테이너 내의 파일" -"에 액세스할 수 있습니다. ``ro``는 ``read-only``을 의미합니다. Docker 볼륨은 " -"기본적으로 ``read-write``로 설정되어 있는데, 이 옵션을 사용하면 볼륨을 " -"``read-only``으로 만들 수 있습니다. 마지막으로 인증서 및 키 파일의 이름을 " -"``--ssl-ca-certfile``, ``--ssl-certfile`` 및 ``--ssl-keyfile`` 플래그와 함께 " -"SuperLink에 전달합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:128 -msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, the " -"mounted files and directories must have the proper permissions for the user " -"ID ``49999``. For example, to change the user ID of all files in the " -"``certificates/`` directory, you can run ``sudo chown -R 49999:49999 " -"certificates/*``." -msgstr "" -"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트" -"된 파일과 디렉터리에 사용자 ID ``49999``에 대한 적절한 권한이 있어야 합니다. " -"예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 사용자 ID를 변경하려" -"면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." - -#: ../../source/how-to-run-flower-using-docker.rst:134 -msgid "Flower SuperNode" -msgstr "Flower SuperNode" -#: ../../source/how-to-run-flower-using-docker.rst:136 -msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower and " -"serves as a base for building your own SuperNode image." -msgstr "" -"SuperNode Docker 이미지는 Flower의 사전 설치된 버전과 함께 제공되며, 자체 " -"SuperNode 이미지를 구축하기 위한 기반 역할을 합니다." +#: ../../source/explanation-flower-architecture.rst:24 +#, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/how-to-run-flower-using-docker.rst:141 +#: ../../source/explanation-flower-architecture.rst:24 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) gets " -"released (ETA: May). A SuperNode nightly image must be paired with the " -"corresponding SuperLink and ServerApp nightly images released on the same " -"day. To ensure the versions are in sync, using the concrete tag, e.g., " -"``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -"SuperNode Docker 이미지는 현재 1.9.0 야간 릴리스에서만 작동합니다. 안정 버전" -"은 Flower 1.9.0(안정)이 출시되면 사용할 수 있습니다(예상 출시일: 5월). " -"SuperNode 야간 이미지는 같은 날 릴리스된 해당 SuperLink 및 서버앱 야간 이미지" -"와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 ``1.9.0." -"dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." -#: ../../source/how-to-run-flower-using-docker.rst:147 +#: ../../source/explanation-flower-architecture.rst:26 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the " -"Flower repository, to illustrate how you can dockerize your ClientApp." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -"Flower 레포지토리에서 찾을 수 있는 ``quickstart-pytorch`` 예제를 사용하여 " -"ClientApp을 도커라이즈하는 방법을 설명하겠습니다." -#: ../../source/how-to-run-flower-using-docker.rst:155 +#: ../../source/explanation-flower-architecture.rst:31 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run your " -"ClientApp instead of the ``quickstart-pytorch`` example." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -"시작하기 전에 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다. " -"'quickstart-pytorch' 예제 대신 ClientApp을 실행하려는 경우 첫 번째 부분을 건" -"너뛸 수 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:159 -msgid "Clone the Flower repository." -msgstr "플라워 레포지토리를 클론합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" -msgstr "SuperNode Dockerfile 만들기" - -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" -msgstr "다음과 같은 프로젝트 레이아웃을 가정해 보겠습니다:" -#: ../../source/how-to-run-flower-using-docker.rst:184 -msgid "" -"First, we need to create a ``requirements.txt`` file in the directory where " -"the ``ClientApp`` code is located. In the file, we list all the dependencies " -"that the ClientApp requires." +#: ../../source/explanation-flower-architecture.rst:36 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -"먼저 ``ClientApp`` 코드가 있는 디렉토리에 ``requirements.txt`` 파일을 " -"만들어야 합니다. 이 파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-flower-architecture.rst:38 msgid "" -"Note that `flwr `__ is already installed in " -"the ``flwr/supernode`` base image, so you only need to include other package " -"dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -"`flwr `__ 는 이미 ``flwr/supernode`` 기본 이" -"미지에 설치되어 있으므로, ``torch``, ``tensorflow`` 등과 같은 다른 패키지 " -"dependencies만 ``requirements.txt``에 포함시키면 됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:200 +#: ../../source/explanation-flower-architecture.rst:41 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` example, " -"create a new file called ``Dockerfile.supernode`` in ``examples/quickstart-" -"pytorch``." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` 예제를 사용하는 경" -"우 ``examples/quickstart-pytorch``에 ``Dockerfile.supernode``라는 새 파일을 " -"생성합니다." -#: ../../source/how-to-run-flower-using-docker.rst:203 -msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +#: ../../source/explanation-flower-architecture.rst:47 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -"``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습" -"니다." -#: ../../source/how-to-run-flower-using-docker.rst:217 +#: ../../source/explanation-flower-architecture.rst:49 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image tagged " -"``nightly`` as a base image and set our working directory to ``/app``. The " -"following instructions will now be executed in the ``/app`` directory. Next, " -"we install the ClientApp dependencies by copying the ``requirements.txt`` " -"file into the image and run ``pip install``. In the last two lines, we copy " -"the ``client.py`` module into the image and set the entry point to ``flower-" -"client-app`` with the argument ``client:app``. The argument is the object " -"reference of the ClientApp (``:``) that will be run " -"inside the ClientApp." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -"처음 두 줄에서는 ``nightly`` 태그가 붙은 SuperNode 이미지를 기본 이미지로 사" -"용하고 작업 디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제 ``/" -"app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로, ``requirements.txt`` 파" -"일을 이미지에 복사하여 ClientApp dependencies 요소를 설치하고 ``pip install``" -"을 실행합니다. 마지막 두 줄에서 ``client.py`` 모듈을 이미지에 복사하고 " -"``client:app`` 인수를 사용하여 진입점을 ``flower-client-app``로 설정합니다. " -"인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 객체 참조 (``:" -"``) 입니다." -#: ../../source/how-to-run-flower-using-docker.rst:226 -msgid "Building the SuperNode Docker image" -msgstr "SuperNode Docker 이미지 빌드" - -#: ../../source/how-to-run-flower-using-docker.rst:228 +#: ../../source/explanation-flower-architecture.rst:53 msgid "" -"Next, we build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile and ClientApp code are located." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하" -"여 SuperNode Docker 이미지를 빌드합니다." -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/explanation-flower-architecture.rst:59 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -"이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1`` 태그를 붙였습니다. 여" -"기서 선택한 값은 예시일 뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습" -"니다." - -#: ../../source/how-to-run-flower-using-docker.rst:240 -msgid "Running the SuperNode Docker image" -msgstr "SuperNode Docker 이미지 실행" -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" -msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Flower 아키텍처" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." -msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." +#: ../../source/explanation-flower-architecture.rst:71 +msgid "The basic Flower architecture for federated learning." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"``--rm``: This option specifies that the container should be automatically " -"removed when it stops." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -"``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." -msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." - -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:79 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of the " -"SuperLinks Fleet" +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -"``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정" -"합니다" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +#: ../../source/explanation-flower-architecture.rst:82 +msgid "" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -"API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." -#: ../../source/how-to-run-flower-using-docker.rst:269 +#: ../../source/explanation-flower-architecture.rst:87 msgid "" -"To test running Flower locally, you can create a `bridge network `__, use the ``--network`` argument and pass the name of the Docker " -"network to run your SuperNodes." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -"로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge network `__를 생성하고 ``--network`` argument를 사용하고 SuperNodes를 실행" -"할 Docker 네트워크의 이름을 전달하면 됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:273 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +#: ../../source/explanation-flower-architecture.rst:97 +msgid "Multi-tenancy federated learning architecture" msgstr "" -"태그 뒤에 오는 모든 argument는 Flower SuperNode 바이너리에 전달됩니다. " -"SuperNode가 지원하는 사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/how-to-run-flower-using-docker.rst:283 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +#: ../../source/explanation-flower-architecture.rst:97 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -"SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 " -"합니다." -#: ../../source/how-to-run-flower-using-docker.rst:285 +#: ../../source/explanation-flower-architecture.rst:99 msgid "" -"Assuming the certificate already exists locally, we can use the flag ``--" -"volume`` to mount the local certificate into the container's ``/app/`` " -"directory. This allows the SuperNode to access the certificate within the " -"container. Use the ``--root-certificates`` flag when starting the container." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로" -"컬 인증서를 컨테이너의 ``/app/`` 디렉터리에 마운트할 수 있습니다. 이렇게 하" -"면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작" -"할 때 ``--root-certificates`` 플래그를 사용하세요." -#: ../../source/how-to-run-flower-using-docker.rst:297 -msgid "Flower ServerApp" -msgstr "Flower 서버앱" - -#: ../../source/how-to-run-flower-using-docker.rst:299 +#: ../../source/explanation-flower-architecture.rst:104 msgid "" -"The procedure for building and running a ServerApp image is almost identical " -"to the SuperNode image." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -"ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니" -"다." -#: ../../source/how-to-run-flower-using-docker.rst:301 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a pre-" -"installed version of Flower and serves as a base for building your own " -"ServerApp image." +#: ../../source/explanation-flower-architecture.rst:113 +msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" -"SuperNode 이미지와 마찬가지로 ServerApp Docker 이미지는 Flower의 사전 설치된 " -"버전과 함께 제공되며, 자체 ServerApp 이미지를 구축하기 위한 기본 역할을 합니" -"다." -#: ../../source/how-to-run-flower-using-docker.rst:304 +#: ../../source/explanation-flower-architecture.rst:113 msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the Flower " -"SuperNode section. If you have not already done so, please follow the " -"`SuperNode Prerequisites`_ before proceeding." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -"여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-pytorch`` 예제를 사용" -"하겠습니다. 아직 수행하지 않았다면 계속 진행하기 전에 `SuperNode " -"Prerequisites`_ 을 따르세요." - -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" -msgstr "ServerApp Dockerfile 만들기" -#: ../../source/how-to-run-flower-using-docker.rst:320 +#: ../../source/explanation-flower-architecture.rst:116 msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples/" -"quickstart-pytorch``." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -"먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을 생성해야 합니다. " -"``quickstart-pytorch`` 예제를 사용하는 경우 ``examples/quickstart-pytorch``" -"에 ``Dockerfile.serverapp``이라는 새 파일을 생성합니다." -#: ../../source/how-to-run-flower-using-docker.rst:324 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +#: ../../source/explanation-flower-architecture.rst:125 +msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" -"``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니" -"다." -#: ../../source/how-to-run-flower-using-docker.rst:335 +#: ../../source/explanation-flower-architecture.rst:125 msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image tagged " -"``1.8.0`` as a base image and set our working directory to ``/app``. The " -"following instructions will now be executed in the ``/app`` directory. In " -"the last two lines, we copy the ``server.py`` module into the image and set " -"the entry point to ``flower-server-app`` with the argument ``server:app``. " -"The argument is the object reference of the ServerApp (``:" -"``) that will be run inside the ServerApp container." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -"처음 두 줄에서는 ``1.8.0`` 태그가 붙은 ServerApp 이미지를 기본 이미지로 사용" -"하고 작업 디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제 ``/" -"app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막 두 줄에서는 ``server.py`` " -"모듈을 이미지에 복사하고 ``server:app`` argument를 사용하여 진입점을 " -"``flower-server-app``로 설정합니다. 인수는 ServerApp 컨테이너 내에서 실행될 " -"ServerApp의 객체 참조(``:``)입니다." - -#: ../../source/how-to-run-flower-using-docker.rst:343 -msgid "Building the ServerApp Docker image" -msgstr "ServerApp Docker 이미지 빌드" -#: ../../source/how-to-run-flower-using-docker.rst:345 +#: ../../source/explanation-flower-architecture.rst:129 msgid "" -"Next, we build the ServerApp Docker image by running the following command " -"in the directory where Dockerfile and ServerApp code are located." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하" -"여 ServerApp Docker 이미지를 빌드합니다." -#: ../../source/how-to-run-flower-using-docker.rst:352 +#: ../../source/explanation-flower-architecture.rst:132 msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습" -"니다. 여기서 선택한 값은 예시일 뿐이라는 점을 기억하세요. 필요에 따라 변경할 " -"수 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:357 -msgid "Running the ServerApp Docker image" -msgstr "ServerApp Docker 이미지 실행" - -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of the " -"SuperLinks Driver" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -"``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지" -"정합니다" -#: ../../source/how-to-run-flower-using-docker.rst:385 -msgid "" -"To test running Flower locally, you can create a `bridge network `__, use the ``--network`` argument and pass the name of the Docker " -"network to run your ServerApps." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -"로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge network `__,를 생성하고 ``--network`` argument를 사용하여 ServerApp을 실행" -"할 Docker 네트워크의 이름을 전달하면 됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:389 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +#: ../../source/explanation-flower-architecture.rst:151 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -"태그 뒤에 오는 모든 argument는 Flower ServerApp 바이너리에 전달됩니다. " -"ServerApp에서 지원하는 사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/how-to-run-flower-using-docker.rst:399 +#: ../../source/explanation-flower-architecture.rst:156 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -"SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 " -"합니다." -#: ../../source/how-to-run-flower-using-docker.rst:401 +#: ../../source/explanation-flower-architecture.rst:161 +#, fuzzy msgid "" -"Assuming the certificate already exists locally, we can use the flag ``--" -"volume`` to mount the local certificate into the container's ``/app/`` " -"directory. This allows the ServerApp to access the certificate within the " -"container. Use the ``--root-certificates`` flags when starting the container." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로" -"컬 인증서를 컨테이너의 ``/app/`` 디렉터리에 마운트할 수 있습니다. 이렇게 하" -"면 ServerApp이 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작" -"할 때 ``--root-certificates`` 플래그를 사용하세요." +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" -#: ../../source/how-to-run-flower-using-docker.rst:412 -msgid "Advanced Docker options" -msgstr "고급 Docker 옵션" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 템플릿" -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" -msgstr "루트 사용자 권한으로 실행" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "목차" -#: ../../source/how-to-run-flower-using-docker.rst:417 -msgid "" -"Flower Docker images, by default, run with a non-root user (username/" -"groupname: ``app``, UID/GID: ``49999``). Using root user is not recommended " -"unless it is necessary for specific tasks during the build process. Always " -"make sure to run the container as a non-root user in production to maintain " -"security best practices." -msgstr "" -"기본적으로 Flower Docker 이미지는 루트 사용자가 아닌 사용자(사용자명/그룹명:" -"``app``, UID/GID: ``49999``)로 실행됩니다. 빌드 프로세스 중 특정 작업에 필요" -"한 경우가 아니라면 루트 사용자를 사용하지 않는 것이 좋습니다. 보안 모범 사례" -"를 유지하려면 항상 프로덕션 환경에서 루트 사용자가 아닌 사용자로 컨테이너를 " -"실행해야 합니다." +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[목차](#목차)" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" -msgstr "**루트 사용자 권한으로 컨테이너 실행하기**" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[요약](#요약)" -#: ../../source/how-to-run-flower-using-docker.rst:424 -msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" -msgstr "" -"``-u`` 플래그를 사용하여 Docker 이미지를 실행하고 사용자 이름으로 ``root``를 " -"지정합니다:" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[동기](#동기)" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." -msgstr "이 명령은 루트 사용자 권한으로 Docker 컨테이너를 실행합니다." +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[목표](#목표)" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" -msgstr "**루트 사용자 권한으로 빌드 프로세스를 실행합니다**" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[비목표](#비목표)" -#: ../../source/how-to-run-flower-using-docker.rst:434 -msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the ``USER " -"root`` directive within your Dockerfile." -msgstr "" -"Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 " -"설치하려면 Dockerfile 내에서 ``USER root`` 지시어를 사용할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[제안](#제안)" -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" -msgstr "다른 Flower 버전 사용" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[단점](#단점)" -#: ../../source/how-to-run-flower-using-docker.rst:456 -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on " -"`Docker Hub `__." -msgstr "" -"다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: " -"Flower nightly). 사용 가능한 모든 버전은 `Docker Hub `__에 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[고려되는 대안](#고려되는 대안)" -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" -msgstr "특정 버전에 Docker 이미지 고정하기" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[부록](#부록)" -#: ../../source/how-to-run-flower-using-docker.rst:462 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that you " -"always use the same image, you can specify the hash of the image instead of " -"the tag." -msgstr "" -"태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 " -"일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 의존성에 대한 보안 " -"업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 " -"이미지의 해시를 지정할 수 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:467 -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" -msgstr "" -"다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니" -"다:" - -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" - -#: ../../source/how-to-run-flower-using-docker.rst:483 -msgid "Setting environment variables" -msgstr "환경 변수 설정" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "요약" -#: ../../source/how-to-run-flower-using-docker.rst:485 -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "" -"Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용" -"하면 됩니다." +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 문장 1: 문제 요약\\]" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "시뮬레이션 실행" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" -#: ../../source/how-to-run-simulations.rst:8 -msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients but " -"without having to source, configure and mange a large number of physical " -"devices; you might want to run your FL workloads as fast as possible on the " -"compute systems you have access to without having to go through a complex " -"setup process; you might want to validate your algorithm on different " -"scenarios at varying levels of data and system heterogeneity, client " -"availability, privacy budgets, etc. These are among some of the use-cases " -"where simulating FL workloads makes sense. Flower can accommodate these " -"scenarios by means of its `VirtualClientEngine `_ or VCE." -msgstr "" -"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클" -"라이언트 집단에서 워크로드를 실행하되 많은 수의 물리적 장치를 소싱, 구성 및 " -"관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨" -"팅 시스템에서 최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이" -"터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 다양한 시나리오에" -"서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사" -"례는 FL 워크로드 시뮬레이션이 적합한 사용 사례 중 일부입니다. Flower는 " -"`VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "동기" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the ones " -"you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a " -"class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In " -"addition to that, clients managed by the :code:`VirtualClientEngine` are:" -msgstr "" -":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니" -"다. 이러한 클라이언트는 `non-virtual` 클라이언트(예: `flwr.client." -"start_client `_ 명령을 통해 실행하는 클라이언" -"트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 방식으" -"로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언" -"트는 다음과 같습니다:" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of the " -"compute and memory on your system. You as a user can control this at the " -"beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." -msgstr "" -"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할" -"당받는다는 것을 의미합니다. 사용자는 시뮬레이션을 시작할 때 이를 제어할 수 있" -"으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " -"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시" -"에 실행할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "목표" -#: ../../source/how-to-run-simulations.rst:13 -msgid "" -"self-managed: this means that you as a user do not need to launch clients " -"manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." -msgstr "" -"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대" -"신 :code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "목표가 아닌 것" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is required " -"in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was " -"assigned and allowing in this way other clients to participate." -msgstr "" -"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니" -"다(예: `fit() `_을 수행하기 위해). " -"객체는 나중에 소멸되어 할당된 리소스를 해제하고 다른 클라이언트가 참여할 수 " -"있도록 허용합니다." +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "제안" -#: ../../source/how-to-run-simulations.rst:16 -msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." -msgstr "" -":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프" -"레임워크인 `Ray `_를 사용하여 `virtual` 클라이언트를 구" -"현합니다. 특히 Flower의 :code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생" -"성하고 해당 워크로드를 실행합니다." +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "단점" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "Flower 시뮬레이션 시작" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "고려되는 대안" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class, a " -"strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your simulation " -"is done with `start_simulation `_ and a minimal example looks as follows:" -msgstr "" -"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함" -"수를 정의하여 데이터 세트를 다운로드하고 로드(및 파티션)해야 합니다. 이 작업" -"을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 같습니" -"다:" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[대안 1\\]" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "VirtualClientEngine 리소스" +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[대안 2\\]" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all " -"GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system " -"resources are used for simulation. You can do this via the :code:" -"`ray_init_args` input argument to :code:`start_simulation` which the VCE " -"internally passes to Ray's :code:`ray.init` command. For a complete list of " -"settings you can configure check the `ray.init `_ documentation. Do not set :" -"code:`ray_init_args` if you want the VCE to use all your system's CPUs and " -"GPUs." -msgstr "" -"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 " -"있으며, 이는 Ray를 시작할 때의 기본 동작이기도 합니다. 그러나 일부 설정에서" -"는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 " -"설정은 VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:" -"`start_simulation`에 대한 :code:`ray_init_args` 입력 인수를 통해 수행할 수 있" -"습니다. 구성할 수 있는 전체 설정 목록은 `ray.init `_ 설명서를 확인하세요. VCE가 " -"시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지 " -"마세요." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower Enhancement Doc" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "클라이언트 리소스 할당" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has 10 " -"cores, that many virtual clients can be concurrently running." -msgstr "" -"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어" -"를 할당합니다(그 외에는 아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 " -"경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Metadata](#metadata)" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your " -"clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your simulation " -"by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used " -"by Ray to schedule and spawn workloads (in our case Flower clients):" -msgstr "" -"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이" -"언트에 할당되는 리소스를 조정하고 싶을 것입니다. 시뮬레이션을 시작할 때 " -"`client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 내부" -"적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하" -"고 스폰합니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[워크플로우](#워크플로우)" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr "" -":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr "" -":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "몇 가지 예를 살펴보겠습니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and having " -"orders of magnitude more `dormant` (i.e. not participating in a round) " -"clients. Let's say you want to have 100 clients per round but your system " -"can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` " -"will schedule 100 jobs to run (each simulating a client sampled by the " -"strategy) and then will execute them in a resource-aware manner in batches " -"of 8." -msgstr "" -"code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있" -"지만, 동일한 라운드에서 수십, 수백 또는 수천 개의 클라이언트를 실행하고 훨씬 " -"더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 수" -"는 없습니다. 라운드당 100명의 클라이언트를 받고 싶지만 시스템이 동시에 8명의 " -"클라이언트만 수용할 수 있다고 가정해 봅시다. code:`VirtualClientEngine`은 실" -"행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 다" -"음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a look " -"at the `Ray documentation `_." -msgstr "" -"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정" -"의하는 방법에 대한 모든 복잡한 세부 사항을 이해하려면 'Ray 문서 '를 참조하세요." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "변화의 동기가 분명한지 확인합니다" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" -msgstr "시뮬레이션 예제" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in Tensorflow/" -"Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too:" -msgstr "" -"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이" -"션 예제는 `Flower 레포지토리 `_에서 제공됩니" -"다. Google Colab에서도 실행할 수 있습니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" -#: ../../source/how-to-run-simulations.rst:98 -msgid "" -"`Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP " -"model on MNIST." -msgstr "" -"`Tensorflow/Keras 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 " -"MLP 모델을 훈련합니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" -"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈" -"련합니다." +"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 " +"적절히 대표되도록 보장합니다" -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "멀티 노드 Flower 시뮬레이션" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" -#: ../../source/how-to-run-simulations.rst:106 -msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure " -"that you:" -msgstr "" -"Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬" -"레이션을 실행할 수 있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항" -"을 확인하세요:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "기능 및 effort-tracking 문서" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "제품 요구 사항 문서" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "디자인 문서" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Have a copy of your dataset in all nodes (more about this in :ref:" -"`simulation considerations `)" -msgstr "" -"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation " -"considerations `에서 확인하세요)" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation `_ so the :code:" -"`VirtualClientEngine` attaches to a running Ray instance." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 :code:" -"`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." +"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변경 사항을 이해하고 전달하기 위해 단일 " +"GitHub 이슈 또는 pull request를 넘어서는 abstraction이 필요합니다." -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start --head`. " -"This command will print a few lines, one of which indicates how to attach " -"other nodes to the head node." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -"헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 " -"명령은 몇 줄을 출력하며, 그 중 하나는 다른 노드를 헤드 노드에 연결하는 방법" -"을 나타냅니다." +"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프로세스는 Slack 스레드, 영상 통화, 복도 " +"대화에서 나온 의사 결정을 잘 추적된 아티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니다." -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"Attach other nodes to the head node: copy the command shown after starting " -"the head and execute it on terminal of a new node: for example :code:`ray " -"start --address='192.168.1.132:6379'`" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" -"헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드" -"의 터미널에서 실행합니다: 예: :code:`ray start --" -"address='192.168.1.132:6379'`" +"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 합니다. 개선 사항을 작성자나 개발자 이외의 다른 " +"사람에게 서면 또는 구두로 설명해야 하는 경우에는 개선 문서 작성을 고려하세요." -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" -"위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 " -"헤드 노드에서 코드를 실행할 수 있습니다." +"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주요 아키텍처 변경)도 널리 알려야 합니다. 개선 " +"프로세스는 일반 사용자나 운영자에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster " -"you simply need to run the command :code:`ray stop` in each node's terminal " -"(including the head node)." +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -"시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널" -"에서 :code:`ray stop` 명령을 실행하기만 하면 됩니다." - -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" +"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을" +" 추가하는 것은 Flower의 작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "" -"여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합" -"니다:" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지하는 것이므로 기능 요청과는 다릅니다." -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as " -"well as the total resources available to the :code:`VirtualClientEngine`." +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" -"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 :code:" -"`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." - -#: ../../source/how-to-run-simulations.rst:126 -msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, all " -"GPUs) will be visible by the head node. This means that the :code:" -"`VirtualClientEngine` can schedule as many `virtual` clients as that node " -"can possible run. In some settings you might want to exclude certain " -"resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in any :" -"code:`ray start` command (including when starting the head)" -msgstr "" -"새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 " -"헤드 노드에 표시됩니다. 즉, :code:`VirtualClientEngine`은 해당 노드가 실행할 " -"수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. 일부 설정에서는 시뮬레" -"이션에서 특정 리소스를 제외하고 싶을 수 있습니다. 모든 :code:`ray start` 명령" -"(헤드 시작 시 포함)에 `--num-cpus=` 및/또는 `--num-" -"gpus=`를 추가하여 이 작업을 수행하면 됩니다" +"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 워크플로우를 따르는 Markdown 파일에" +" 캡처됩니다." -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "시뮬레이션 시 고려 사항" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Enhancement Doc 템플릿" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"We are actively working on these fronts so to make it trivial to run any FL " -"workload with Flower simulation." -msgstr "" -"Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측" -"면에서 적극적으로 노력하고 있습니다." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" -#: ../../source/how-to-run-simulations.rst:138 -msgid "" -"The current VCE allows you to run Federated Learning workloads in simulation " -"mode whether you are prototyping simple scenarios on your personal laptop or " -"you want to train a complex FL pipeline across multiple high-performance GPU " -"nodes. While we add more capabilities to the VCE, the points below highlight " -"some of the considerations to keep in mind when designing your FL pipeline " -"with Flower. We also highlight a couple of current limitations in our " -"implementation." -msgstr "" -"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 " -"고성능 GPU 노드에서 복잡한 FL 파이프라인을 훈련하든 상관없이 시뮬레이션 모드" -"에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가" -"하는 동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 " -"가지 사항을 강조합니다. 또한 현재 구현에서 몇 가지 제한 사항을 강조합니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "GPU 리소스" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Title (metadata와 같게)" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table of Contents (필요시)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Constraints/Caveats (선택 사항)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Design Details (선택 사항)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "졸업 기준" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "참고로 이 문서는 위의 구조를 따릅니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Metadata" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key :" -"code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" -"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트" -"에 GPU 메모리 공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본" -"적으로 사용됩니다:" +"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번호를 사용하면 다른 제안을 " +"쉽게 참조할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you " -"set :code:`num_gpus=0.5` and you have two GPUs in your system with different " -"(e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently." -msgstr "" -"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: " -"32GB와 8GB) VRAM 용량을 가진 두 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하" -"면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)를 참조하세요." -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "" -"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는" -"지 알지 못합니다. 여기서 두 가지 시사점을 얻을 수 있습니다:" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" -msgstr "" -"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다" -"(예: `evaluate method `_를 사용할 때)" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with :code:" -"`CUDA_VISIBLE_DEVICES=\"\"` when launching your experiment." -msgstr "" -"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시" -"작할 때 :code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` are " -"not `enforced` (i.e. they can be exceeded) which can result in the situation " -"of client using more VRAM than the ratio specified when starting the " -"simulation." -msgstr "" -"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, " -"초과할 수 있음) 클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 " -"VRAM을 사용하는 상황이 발생할 수 있습니다." +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "GPU를 사용한 TensorFlow" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." -#: ../../source/how-to-run-simulations.rst:158 -msgid "" -"When `using a GPU with TensorFlow `_ " -"nearly your entire GPU memory of all your GPUs visible to the process will " -"be mapped. This is done by TensorFlow for optimization purposes. However, in " -"settings such as FL simulations where we want to split the GPU into multiple " -"`virtual` clients, this is not a desirable mechanism. Luckily we can disable " -"this default behavior by `enabling memory growth `_." -msgstr "" -"`TensorFlow와 함께 GPU를 사용 `_하면 프" -"로세스에 보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목" -"적으로 TensorFlow에서 수행됩니다. 그러나 GPU를 여러 개의 '가상' 클라이언트로 " -"분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니" -"다. 다행히도 '메모리 증가 활성화 `_'를 통해 이 기본 동작을 비활성화할 수 있습니" -"다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "워크플로우" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"This would need to be done in the main process (which is where the server " -"would run) and in each Actor created by the VCE. By means of :code:" -"`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` in order " -"to specify a function to be executed upon actor initialization. In this " -"case, to enable GPU growth for TF workloads. It would look as follows:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" -"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행" -"해야 합니다. :code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전" -"달하여 액터 초기화 시 실행할 함수를 지정할 수 있습니다. 이 경우 TF 워크로드" -"에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" +"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있어야 합니다. 따라서 개선 사항을 주도하는 사(보통 " +"작성자)이 필요합니다. 이 사람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ " -"example." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -"이것이 바로`Tensorflow/Keras Simulation `_ 예제에서 사용된 메커니즘입니다." +"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크인되며, `NNNN`은 " +"Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 개선 사항은 pull request의 일부로 `잠정`" +" 상태에서 시작됩니다. 토론은 pull request 검토의 일부로 이루어집니다." -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "멀티 노드 설정" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +msgid "" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." +msgstr "" +"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실제 구현은 별도의 pull requests를 통해 " +"이루어집니다. 이러한 pull requests는 설명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 " +"'구현됨'으로 변경됩니다." -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"The VCE does not currently offer a way to control on which node a particular " -"`virtual` client is executed. In other words, if more than a single node " -"have the resources needed by a client to run, then any of those nodes could " -"get the client workload scheduled onto. Later in the FL process (i.e. in a " -"different round) the same client could be executed by a different node. " -"Depending on how your clients access their datasets, this might require " -"either having a copy of all dataset partitions on all nodes or a dataset " -"serving mechanism (e.g. using nfs, a database) to circumvent data " -"duplication." -msgstr "" -"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제" -"공하지 않습니다. 즉, 클라이언트가 실행하는 데 필요한 리소스가 하나 이상의 노" -"드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있" -"습니다. FL 프로세스 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 " -"노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 방식에 따" -"라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피" -"하기 위해 데이터 세트 제공 메커니즘(예: nfs, 데이터베이스 사용)을 사용해야 " -"할 수 있습니다." +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral nature. " -"A client state can be implemented as part of the Flower client class but " -"users need to ensure this saved to persistent storage (e.g. a database, " -"disk) and that can be retrieve later by the same client regardless on which " -"node it is running from. This is related to the point above also since, in " -"some way, the client's dataset could be seen as a type of `state`." -msgstr "" -"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 " -"상태는 Flower 클라이언트 클래스의 일부로 구현할 수 있지만, 사용자는 이를 영" -"구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없" -"이 동일한 클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라" -"이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 위의 요점과도 관련" -"이 있습니다." +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활발하게 정의 및 논의되는 동안의 시작 단계입니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "모델 체크포인트 저장 및 로드" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Flower does not automatically save model updates on the server-side. This " -"how-to guide describes the steps to save (and load) model checkpoints in " -"Flower." -msgstr "" -"Flower는 서버 측에서 모델 업데이트를 자동으로 저장하지 않습니다. 이 사용법 가" -"이드에서는 Flower에서 모델 체크포인트를 저장(및 로드)하는 단계에 대해 설명합" -"니다." +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "모델 체크포인트" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"Model updates can be persisted on the server-side by customizing :code:" -"`Strategy` methods. Implementing custom strategies is always an option, but " -"for many cases it may be more convenient to simply customize an existing " -"strategy. The following code example defines a new :code:`SaveModelStrategy` " -"which customized the existing built-in :code:`FedAvg` strategy. In " -"particular, it customizes :code:`aggregate_fit` by calling :code:" -"`aggregate_fit` in the base class (:code:`FedAvg`). It then continues to " -"save returned (aggregated) weights before it returns those aggregated " -"weights to the caller (i.e., the server):" -msgstr "" -":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 " -"수 있습니다. 사용자 지정 전략을 구현하는 것은 항상 옵션이지만 대부분의 경우 " -"기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시" -"는 기존의 기본 제공 :code:`FedAvg` 전략을 사용자 지정한 새로운 :code:" -"`SaveModelStrategy`를 정의합니다. 특히, 기본 클래스(:code:`FedAvg`)에서 :" -"code:`aggregate_fit`을 호출하여 :code:`aggregate_fit`을 사용자 지정합니다. 그" -"런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 반환된(집계된) 가" -"중치를 계속 저장합니다:" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "파이토치 체크포인트 저장 및 로드" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"Similar to the previous example but with a few extra steps, we'll show how " -"to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be " -"transformed into a list of NumPy ``ndarray``'s, then those are transformed " -"into the PyTorch ``state_dict`` following the ``OrderedDict`` class " -"structure." +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" -"이전 예제와 비슷하지만 몇 가지 단계가 추가되어 ``torch.save`` 함수를 사용하" -"여 파이토치 체크포인트를 저장하는 방법을 보여드리겠습니다. 먼저, " -"``aggregate_fit``은 ``Parameters`` 객체를 반환하는데, 이 객체는 NumPy " -"``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파" -"이토치 ``state_dict``로 변환됩니다." +"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추가하면 더 복잡해지고 잠재적인 처음인" +" 기여자에게는 장벽이 될 수 있습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"To load your progress, you simply append the following lines to your code. " -"Note that this will iterate over all saved checkpoints and load the latest " -"one:" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" -"진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 " -"저장된 모든 체크포인트를 반복하고 최신 체크포인트를 로드합니다:" +"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장하는 것은 영어가 모국어가 아닌 사용자에게는 큰 " +"부담이 될 수 있습니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 이슈" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as in " -"the ``initial_parameters`` when defining a ``Strategy``." -msgstr "" -"``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형" -"의 이 객체를 반환/사용합니다." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." +msgstr "" +"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그를 사용하여 다른 이슈와 구별하고 필터링할 수 " +"있습니다. 주요 이슈는 개선 사항에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습니다. 개선 " +"사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 " +"토론을 관리하면 혼란스러울 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "Flower 1.0으로 업그레이드" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google 문서 도구" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable " -"foundation for future growth. Compared to Flower 0.19 (and other 0.x series " -"releases), there are a few breaking changes that make it necessary to change " -"the code of existing 0.x-series projects." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -"Flower 1.0이 출시되었습니다. 새로운 기능과 함께 Flower 1.0은 향후 성장을 위" -"한 안정적인 기반을 제공합니다. Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교" -"했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 획기적인 변" -"경 사항이 있습니다." +"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 외부에서 호스팅되므로 커뮤니티에서 검색할 " +"수 있도록 관리해야 합니다. 모든 제안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일부로 " +"제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "업데이트 설치" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 개선 문서" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "종합 평가 결과" + +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either pip " -"or Poetry:" -msgstr "" -"다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방" -"법입니다:" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 집계를 완전히 사용자 지정할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip: 설치할 때 ``-U``를 추가합니다." +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "사용자 지정 평가 결과 집계" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"The same :code:`Strategy`-customization approach can be used to aggregate" +" custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -"``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 사용" -"하는 경우)" +"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오는 사용자 지정 평가 결과를 집계할 수 " +"있습니다. 클라이언트는 dictionary를 반환하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" -msgstr "" -"``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하는 메트릭을 집계할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" +msgstr "SuperNodes 인증하기" + +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"Poetry: ``pyproject.toml``에서 ``flwr`` dependency을 업데이트한 다음 다시 설" -"치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry." -"lock``을 삭제하는 것을 잊지 마세요)." +"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있는 인증된 SuperNodes에" +" 대한 기본 지원을 제공합니다. Flower 노드 인증은 GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-authenticate-supernodes.rst:7 +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" + +#: ../../source/how-to-authenticate-supernodes.rst:8 msgid "" -"``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when using " -"``start_simulation``)" -msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " -"(``start_simulation`` 사용 시)" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "비밀 공유는 SuperNode에서 SuperLink로 토큰으로 전송된 메시지의 HMAC 값을 계산하는 데 사용됩니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "필수 변경 사항" +#: ../../source/how-to-authenticate-supernodes.rst:10 +msgid "SuperLink verifies the token" +msgstr "SuperLink가 토큰을 확인합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." +msgstr "" +"인증된 환경에서 Flower로 연합 학습을 시연하는 전체 '코드 예제 " +"`_를 확인하는 것이 좋습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "일반" +#: ../../source/how-to-authenticate-supernodes.rst:15 +msgid "" +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설명합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-authenticate-supernodes.rst:18 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" -msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 " -"예시입니다:" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 경우에만 사용할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-authenticate-supernodes.rst:21 +msgid "Enable node authentication in :code:`SuperLink`" +msgstr ":code:`SuperLink`에서 노드 인증 활성화" + +#: ../../source/how-to-authenticate-supernodes.rst:23 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" -msgstr "" -"Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower " +":code:`SuperLink`. Use the following terminal command to start a Flower " +":code:`SuperNode` that has both secure connections and node " +"authentication enabled:" +msgstr "" +"노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/TLS 연결을 구성해야 합니다. " +"전체 가이드는 `여기 `_에서 확인할 수 있습니다. 보안 연결을 구성한 후, 장기 실행하는 Flower " +":code:`SuperLink`에서 클라이언트 인증을 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 보안 연결과 노드 인증이 " +"모두 활성화된 Flower :code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-authenticate-supernodes.rst:38 +msgid "Let's break down the authentication flags:" +msgstr "인증 플래그를 세분화해 보겠습니다:" + +#: ../../source/how-to-authenticate-supernodes.rst:40 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())``" +"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " +"file storing all known node public keys. You need to store all known node" +" public keys that are allowed to participate in a federation in one CSV " +"file (:code:`.csv`)." msgstr "" -"Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:" -"8080\", client=FlowerClient())``" +"첫 번째 플래그 :code:`--auth-list-public-keys`는 알려진 모든 노드 공개키를 저장하는 CSV 파일의 경로를" +" 기대합니다. federation에 참여하도록 허용된 모든 알려진 노드 공개 키를 하나의 CSV 파일(:code:`.csv`)에 " +"저장해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "클라이언트" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-authenticate-supernodes.rst:42 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " -"get_parameters(self, config):``로 변경합니다" +"알려진 노드 공개키를 저장하는 유효한 CSV 파일은 쉼표로 구분하고 주석 없이 OpenSSH 형식으로 키를 나열해야 합니다. 예를 " +"들어, 두 개의 알려진 노드 공개키가 포함된 CSV 파일이 포함된 코드 샘플을 참조하세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:44 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"The second and third flags :code:`--auth-superlink-private-key` and :code" +":`--auth-superlink-public-key` expect paths to the server's private and " +"public keys. For development purposes, you can generate a private and " +"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." msgstr "" -"``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " -"get_parameters(self, ins: GetParametersIns):``로 변경합니다" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "전략 / ``start_server`` / ``start_simulation``" +"두 번째 및 세 번째 플래그 :code:`--auth-superlink-private-key` 및 :code:`--auth-" +"superlink-public-key`는 서버의 개인 및 공개 키의 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-authenticate-supernodes.rst:47 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``" -"에 전달합니다. 다음은 예제입니다:" +"Flower 1.9에서는 알려진 노드 공개키를 SuperLink에 동적으로 제거, 편집 또는 추가하는 기능이 지원되지 않습니다. " +"알려진 노드 집합을 변경하려면 서버를 종료하고 CSV 파일을 편집한 다음 서버를 다시 시작해야 합니다. 알려진 노드 집합을 동적으로" +" 변경하는 기능은 Flower 1.10(출시 예정일: 6월)에서 로드맵에 포함되어 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 -msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" -msgstr "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +#: ../../source/how-to-authenticate-supernodes.rst:53 +msgid "Enable node authentication in :code:`SuperNode`" +msgstr ":code:`SuperNode`에서 노드 인증을 활성화합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-authenticate-supernodes.rst:55 msgid "" -"Flower 1.0: ``start_server(..., config=flwr.server." -"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" +"Similar to the long-running Flower server (:code:`SuperLink`), you can " +"easily enable node authentication in the long-running Flower client " +"(:code:`SuperNode`). Use the following terminal command to start an " +"authenticated :code:`SuperNode`:" msgstr "" -"Flower 1.0: ``start_server(..., config=flwr.server." -"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" +"장기 실행 중인 Flower 서버(:code:`SuperLink`)와 마찬가지로, 장기 실행 중인 Flower " +"클라이언트(:code:`SuperNode`)에서도 노드 인증을 쉽게 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 인증된 " +":code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-authenticate-supernodes.rst:66 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"The :code:`--auth-supernode-private-key` flag expects a path to the " +"node's private key file and the :code:`--auth-supernode-public-key` flag " +"expects a path to the node's public key file. For development purposes, " +"you can generate a private and public key pair using :code:`ssh-keygen -t" +" ecdsa -b 384`." msgstr "" -"``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``" -"로 바꿉니다(이전 항목 참조)" +":code:`--auth-supernode-private-key` 플래그는 노드의 개인 키 파일 경로를, :code:`--auth-" +"supernode-public-key` 플래그는 노드의 공개 키 파일 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-authenticate-supernodes.rst:70 +msgid "Security notice" +msgstr "보안 공지" + +#: ../../source/how-to-authenticate-supernodes.rst:72 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by " -"configuring the strategy to sample all clients for evaluation after the last " -"round of training." +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"'start_server`` 호출에서 ``force_final_distributed_eval`` 매개변수를 제거합니" -"다. 모든 클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모" -"든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 있습니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" +"시스템의 보안은 SuperLink와 각SuperNode의 자격 증명에 의존합니다. 따라서 공개키 기반구조(PKI) 사칭 공격과 같은" +" 보안 위험을 피하기 위해 자격 증명을 보호하고 안전하게 보관하는 것이 필수적입니다. 노드 인증 메커니즘에는 사람의 상호 작용도 " +"포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으로 이루어지도록 하세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:68 +#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "결론" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-authenticate-supernodes.rst:79 msgid "" -"Strategy initialization: if the strategy relies on the default values for " -"``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize FedAvg " -"with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"You should now have learned how to start a long-running Flower server " +"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " +"authentication enabled. You should also know the significance of the " +"private key and store it safely to minimize security risks." msgstr "" -"전략 초기화: 전략이 ``fraction_fit`` 및 ``fraction_evaluate``의 기본값에 의존" -"하는 경우 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 수동 설정합니" -"다. 전략을 수동으로 생성하지 않는 프로젝트(전략 인스턴스를 전달하지 않고 " -"``start_server`` 또는 ``start_simulation``을 호출하여)는 이제 " -"``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를 수동" -"으로 초기화해야 합니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" +"이제 노드 인증이 활성화된 상태에서 장기간 실행되는 Flower 서버(:code:`SuperLink`)와 " +"클라이언트(:code:`SuperNode`)를 시작하는 방법을 배웠을 것입니다. 또한 보안 위험을 최소화하기 위해 개인키의 중요성을" +" 알고 안전하게 보관해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "클라이언트 구성" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -"``rnd``의 이름을 ``server_round``로 바꿉니다. 이는 여러 메서드 및 함수(예: " -"``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, " -"``aggregate_evaluate`` 및 ``evaluate_fn``)에 영향을 미칩니다." +"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구성 값은 다양한 용도로 사용할 수 있습니다. 예를" +" 들어 서버에서 클라이언트 측 하이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" +#: ../../source/how-to-configure-clients.rst:7 +msgid "Configuration values" +msgstr "구성 값" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-configure-clients.rst:9 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " -"Dict[str, Scalar]]]:``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " -"Dict[str, Scalar]]]:``" +"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), ``int`` 또는" +" ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전으로 표현됩니다. 다음은 Python의 구성 사전 " +"예제입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-clients.rst:20 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " -"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " -"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "사용자 정의 전략" +"Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현으로 직렬화하고, " +"gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python dictionaries로 역직렬화합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-clients.rst:24 msgid "" -"The type of parameter ``failures`` has changed from ``List[BaseException]`` " -"to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in " -"``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], " -"BaseException]]`` (in ``aggregate_evaluate``)" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -"매개변수 ``failures``의 유형이 ``List[BaseException]``에서 " -"``List[Union[Tuple[ClientProxy], FitRes], BaseException]]``(``aggregate_fit``" -"에서) 및 ``List[Union[Tuple[ClientProxy], EvaluateRes], " -"BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" +"현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 전송하는 기능은 지원되지 " +"않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 " +"있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-configure-clients.rst:26 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -"이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 " -"번째 파라미터로 받습니다:" +"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary을 사용하여 JSON 문자열을 전송한 다음" +" 클라이언트에서 다시 부동 소수점 숫자 목록으로 변환할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 -msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +#: ../../source/how-to-configure-clients.rst:30 +msgid "Configuration through built-in strategies" +msgstr "기본 제공 전략을 통한 구성" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-clients.rst:32 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" -"> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" +"called configuration functions. A configuration function is a function " +"that the built-in strategy calls to get the configuration dictionary for " +"the current round. It then forwards the configuration dictionary to all " +"the clients selected during that round." msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" -"> Optional[Tuple[float, Dict[str, Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "선택적 개선 사항" +"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제공 전략을 사용하는 것입니다. 기본 제공 " +"전략은 소위 구성 함수를 지원합니다. 구성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니다. 그런 " +"다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-configure-clients.rst:34 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -"위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" +"간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) 현재 글로벌 연합 라운드, (c) 클라이언트 " +"측에서 학습할 에포크 수를 전송하고 싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-configure-clients.rst:47 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then empty " -"placeholder implementations of ``evaluate`` are no longer necessary." +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +":code:`on_fit_config_fn`:" msgstr "" -"``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제" -"거합니다. 예를 들어 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시" -"자 구현은 더 이상 필요하지 않습니다." +"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:`on_fit_config_fn`을 사용하여 " +"``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-configure-clients.rst:56 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" + +#: ../../source/how-to-configure-clients.rst:67 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"``start_simulation``을 통해 라운드 타임아웃을 구성합니다: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "추가 도움말" +"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. 다른 배치 크기를 사용하기 위해 " +"다른 구성 값을 `evaluate`로 보내려고 할 수 있기 때문에 이 함수는 별도의 함수입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a " -"reference for using the Flower 1.0 API. If there are further questions, " -"`join the Flower Slack `_ and use the channel " -"``#questions``." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"대부분의 공식 ``Flower code 예제 `_는 이미 Flower 1.0으로 업데이트되어 있으며, Flower 1.0 API를 사용" -"하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬" -"랙 `_에 가입하여 ``#questions`` 채널을 이용하" -"세요." +"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy.configure_fit` 또는 " +"`Strategy.configure_evaluate`가 실행될 때마다). 매 라운드마다 `on_evaluate_config_fn`을" +" 호출하면 연속된 라운드에서 config dict를 변경/변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 " +"위해 하이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" -msgstr "Flower Next 업그레이드" +#: ../../source/how-to-configure-clients.rst:82 +msgid "The :code:`FedAvg` strategy will call this function *every round*." +msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-configure-clients.rst:85 +msgid "Configuring individual clients" +msgstr "개별 클라이언트 구성" + +#: ../../source/how-to-configure-clients.rst:87 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! Whether " -"you're a seasoned user or just getting started, this guide will help you " -"smoothly transition your existing setup to take advantage of the latest " -"features and improvements in Flower Next, starting from version 1.8." -msgstr "" -"Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니" -"다! 이 가이드는 숙련된 사용자든 이제 막 시작한 사용자든 상관없이 기존 설정을 " -"원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " -"수 있도록 도와드립니다." +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-configure-clients.rst:89 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another guide, " -"we will show how to run Flower Next end-to-end with pure Flower Next APIs." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" -"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으" -"로 ``1.8`` 이전의 Flower 코드를 재사용하는 방법을 보여줍니다. 다른 가이드에서" -"는 순수한 Flower Next API로 Flower Next를 end-to-end로 실행하는 방법을 보여드" -"리겠습니다." +"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from scratch " +"`를 통해 수행할 수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " +"추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예입니다: \"world\"`` 구성 키/값 쌍을 *단일 " +"클라이언트*의 config dict에 추가합니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한\" 구성" +" 값을 수신하지 않음):" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" -msgstr "자세히 알아봅시다!" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "로깅 구성" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Here's how to update an existing installation of Flower to Flower Next with " -"``pip``:" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -"기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 " -"같습니다:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" -msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" +"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합니다. 기본적으로 표준 메시지 형식에 따라" +" 정보를 표시합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"Ensure you set the following version constraint in your ``requirements.txt``" -msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" +"containing relevant information including: log message level (e.g. " +":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " +"took place from, as well as the log message itself. In this way, the " +"logger would typically display information on your terminal as follows:" +msgstr "" +"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 등 " +"관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" -msgstr "또는 ``pyproject.toml``:" +#: ../../source/how-to-configure-logging.rst:34 +msgid "Saving log to file" +msgstr "파일에 로그 저장" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" -msgstr "Poetry 사용" +#: ../../source/how-to-configure-logging.rst:36 +msgid "" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do :code:`fl.server.start_server`) and when " +"using the :code:`VirtualClientEngine` (i.e. when you do " +":code:`fl.simulation.start_simulation`). In some situations you might " +"want to save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" +msgstr "" +"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니다. 이는 gRPC 기반 " +"페더레이션(즉,:code:`fl.simulation.start_simulation`를 실행하는 경우)과 " +":code:`VirtualClientEngine`을 사용하는 경우(즉, " +":코드:`fl.simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 이 로그를 " +"디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger.configure() " +"`_" +" 함수를 호출하여 저장할 수 있습니다. 예를 들어:" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-configure-logging.rst:53 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"With the above, Flower will record the log you see on your terminal to " +":code:`log.txt`. This file will be created in the same directory as were " +"you are running the code from. If we inspect we see the log above is also" +" recorded but prefixing with :code:`identifier` each line:" msgstr "" -"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``" -"poetry install``을 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 " -"삭제하는 것을 잊지 마세요)." +"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한 " +"디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 " +"붙는 것을 확인할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -msgid "" -"Ensure you set the following version constraint in your ``pyproject.toml``:" -msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" +#: ../../source/how-to-configure-logging.rst:74 +msgid "Log your own messages" +msgstr "나만의 메시지 기록" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-configure-logging.rst:76 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, you " -"create a |clientapp_link|_ and start it via the command line. Instead of " -"starting a server in code via ``start_server()``, you create a |" -"serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you to " -"run your project both in the traditional way and in the Flower Next way:" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -"Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. " -"코드에서 ``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 " -"|clientapp_link|_를 생성하여 시작합니다. 코드에서 ``start_server()``를 통해 " -"서버를 시작하는 대신 |serverapp_link|_를 생성하고 명령줄을 통해 서버를 시작합" -"니다. 서버와 클라이언트의 장기 실행 컴포넌트를 SuperLink와 SuperNode라고 합니" -"다. 수동 업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로" -"젝트를 실행할 수 있는 non-breaking 변경 사항은 다음과 같습니다:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" -msgstr "|clientapp_link|_" +"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과 같이 쉽게 " +"추가할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-configure-logging.rst:102 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it via " -"|startclient_link|_. Here's an example:" -msgstr "" -"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 " -"래핑하세요. 다음은 예시입니다:" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" -msgstr "|serverapp_link|_" +#: ../../source/how-to-configure-logging.rst:128 +msgid "Log to a remote service" +msgstr "원격 서비스에 로그인" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-configure-logging.rst:130 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting the " -"server via |startserver_link|_. Here's an example:" -msgstr "" -"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략" -"을 |serverapp_link|_로 래핑하세요. 다음은 예시입니다:" +"The :code:`fl.common.logger.configure` function, also allows specifying a" +" host to which logs can be pushed (via :code:`POST`) through a native " +"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" +" feature in :code:`gRPC`-based Federated Learning workloads where " +"otherwise gathering logs from all entities (i.e. the server and the " +"clients) might be cumbersome. Note that in Flower simulation, the server " +"automatically displays all logs. You can still specify a " +":code:`HTTPHandler` should you wish to backup or analyze the logs " +"somewhere else." +msgstr "" +"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python " +":code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정할 수 " +"있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)에서 로그를 수집하는 것이 번거로울 수 있는 " +":code:`gRPC` 기반 Federated 학습 워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 " +"로그를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:`HTTPHandler`를 지정할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" -msgstr "배포" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "SSL 연결 사용" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in " -"sequence, |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|" -"_. There is no need to execute `client.py` and `server.py` as Python scripts." +"This guide describes how to a SSL-enabled secure Flower server " +"(:code:`SuperLink`) can be started and how a Flower client " +"(:code:`SuperNode`) can establish a secure connections to it." msgstr "" -"실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 " -"후 |flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 " -"순서대로 실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 " -"필요는 없습니다." +"이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하는 방법과 Flower " +"클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하는 방법을 설명합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-enable-ssl-connections.rst:7 msgid "" -"Here's an example to start the server without HTTPS (only for prototyping):" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -"다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" +"보안 연결을 보여주는 전체 코드 예제는 '여기 " +"`_'에서 확인할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-enable-ssl-connections.rst:10 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, " -"``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths " -"to (CA certificate, server certificate, and server private key)." +"The code example comes with a :code:`README.md` file which explains how " +"to start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-" -"certfile``, '`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 " -"및 서버 개인 키)의 경로를 전달합니다." +"코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. 이미 SSL을 사용하도록 설정되어 " +"있지만 그 방법에 대한 설명이 부족할 수 있습니다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" -msgstr "CLI 시뮬레이션" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "인증서" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-enable-ssl-connections.rst:18 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and |" -"serverapp_link|_, respectively. There is no need to use |startsim_link|_ " -"anymore. Here's an example:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in :code:`examples/advanced-" +"tensorflow/certificates/generate.sh` with the following command sequence:" msgstr "" -"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하" -"세요. 더 이상 |startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" +"SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 가이드에서는 자체 서명된 인증서를 생성하겠습니다. " +"이 과정은 상당히 복잡할 수 있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script. " -"Here's an example (assuming the ``server_app`` and ``client_app`` objects " -"are in a ``sim.py`` module):" +"This will generate the certificates in :code:`examples/advanced-" +"tensorflow/.cache/certificates`." msgstr "" -"CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 " -"코드에서 ``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다" -"(``server_app`` 및 ``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" +"이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서가 " +"생성됩니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-enable-ssl-connections.rst:31 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the ``client_resources`` " -"argument in |startsim_link|_. Here's an example:" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-" -"config`` 명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설" -"정하세요. 다음은 예시입니다:" +"이 예의 맥락에서 SSL 인증서를 생성하는 접근 방식은 영감과 출발점이 될 수 있지만 프로덕션 환경에 대한 참조로 사용해서는 안 " +"됩니다. 프로덕션 환경용 인증서를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프로토타이핑 또는 연구 " +"프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" -msgstr "Notebook에서 시뮬레이션" +#: ../../source/how-to-enable-ssl-connections.rst:39 +msgid "Server (SuperLink)" +msgstr "서버(SuperLink)" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-enable-ssl-connections.rst:41 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an " -"example:" -msgstr "" -"notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입" -"니다:" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)를 시작합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-enable-ssl-connections.rst:50 msgid "" -"Some official `Flower code examples `_ are " -"already updated to Flower Next so they can serve as a reference for using " -"the Flower Next API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``. You " -"can also `participate in Flower Discuss `_ where " -"you can find us answering questions, or share and learn from others about " -"migrating to Flower Next." -msgstr "" -"일부 공식 ``Flower 코드 예제 `_는 이미 플라" -"워 넥스트에 업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 " -"있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 `_에 가입하고 ``#questions`` 채널을 이용하세요. 또한, ``Flower Discuss " -"`_에 참여하여 질문에 대한 답변을 확인하거나 다른 " -"사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" -msgstr "중요" +#: ../../source/how-to-enable-ssl-connections.rst:54 +msgid "Client (SuperNode)" +msgstr "클라이언트(SuperNode)" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-enable-ssl-connections.rst:56 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "" -"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으" -"로 업데이트될 예정입니다. 피드백이 있으면 언제든지 공유해 주세요!" - -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "행복한 마이그레이션! 🚀" - -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "기본 제공 모드 사용" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:64 msgid "" -"**Note: This tutorial covers experimental features. The functionality and " -"interfaces may change in future versions.**" -msgstr "" -"**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버" -"전에서 변경될 수 있습니다.**" +"When setting :code:`root_certificates`, the client expects a file path to" +" PEM-encoded root certificates." +msgstr "코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 파일 경로를 예상합니다." -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-enable-ssl-connections.rst:70 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment the " -"behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us " -"to perform operations before and after a task is processed in the " -"``ClientApp``." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -"이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법" -"을 배우겠습니다. Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업" -"이 처리되기 전과 후에 작업을 수행할 수 있습니다." +"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " +"방법을 배웠을 것입니다." -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" -msgstr "Mods란 무엇인가요?" +#: ../../source/how-to-enable-ssl-connections.rst:75 +msgid "Additional resources" +msgstr "추가 리소스" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-enable-ssl-connections.rst:77 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or " -"inspect the incoming ``Message`` and the resulting outgoing ``Message``. The " -"signature for a ``Mod`` is as follows:" -msgstr "" -"Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 " -"나가는 ``Message``를 조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음" -"과 같습니다:" - -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" -msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" - -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "Mods 사용" - -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" -msgstr "1. 필요한 mods를 가져옵니다" +#: ../../source/how-to-enable-ssl-connections.rst:79 +msgid "`Let's Encrypt `_" +msgstr "'암호화하세요 `_'" -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" -msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "`certbot `_" +msgstr "`인증봇 `_" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" -msgstr "2. 클라이언트 기능 정의" +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "전략 구현" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." +msgstr "" +"전략 추상화를 통해 완전한 맞춤형 전략을 구현할 수 있습니다. 전략은 기본적으로 서버에서 실행되는 연합 학습 알고리즘입니다. 전략은" +" 클라이언트를 샘플링하는 방법, 학습을 위해 클라이언트를 구성하는 방법, 업데이트를 집계하는 방법, 모델을 평가하는 방법을 " +"결정합니다. Flower는 아래에 설명된 것과 동일한 API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" -msgstr "3. mods로 ``ClientApp``을 생성합니다" +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The :code:`Strategy` abstraction" +msgstr ":code:`Strategy` 추상화" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:13 msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"All strategy implementation are derived from the abstract base class " +":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -"``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods" -"를 제공하는 순서가 중요합니다:" - -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" -msgstr "실행 순서" +"모든 전략 구현은 기본 제공 구현과 타사 구현 모두 추상 기본 클래스인 " +":code:`flwr.server.strategy.Strategy`에서 파생됩니다. 즉, 사용자 정의 전략 구현은 기본 제공 구현과" +" 완전히 동일한 기능을 사용할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" - -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" -msgstr "``example_mod_1``(가장 바깥쪽 mod)" - -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" -msgstr "``example_mod_2`` (다음 mod)" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-implement-strategies.rst:75 msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"Creating a new strategy means implementing a new :code:`class` (derived " +"from the abstract base class :code:`Strategy`) that implements for the " +"previously shown abstract methods:" msgstr "" -"Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하" -"는 핵심 함수)" +"새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :code:`class`(추상 기본 클래스 " +":code:`Strategy`에서 파생됨)를 구현하는 것을 의미합니다:" -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" -msgstr "``example_mod_2``(돌아가는 방법)" +#: ../../source/how-to-implement-strategies.rst:100 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The following sections describe each of those methods in more detail." +msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-implement-strategies.rst:180 +msgid "The :code:`initialize_parameters` method" +msgstr ":code:`initialize_parameters` 메서드" + +#: ../../source/how-to-implement-strategies.rst:182 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` before " -"passing it to the next mod, and likewise with the outgoing ``Message`` " -"before returning it up the stack." +":code:`initialize_parameters` is called only once, at the very beginning " +"of an execution. It is responsible for providing the initial global model" +" parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" -"각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회" -"가 있으며, 스택 위로 반환하기 전에 나가는 ``Message``도 마찬가지로 검사하고 " -"수정할 수 있습니다." +"code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 함수는 초기 전역 모델 " +"파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으로 제공하는 역할을 합니다." -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-implement-strategies.rst:184 msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of mods " -"is crucial and affects how the input and output are processed." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +":code:`FedAvg`:" msgstr "" -"이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키" -"는 방법을 배웠습니다. mods 순서는 매우 중요하며 입력과 출력이 처리되는 방식" -"에 영향을 미친다는 점을 기억하세요." - -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" +"기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 매개 변수를 :code:`FedAvg`에 전달하는 " +"방법을 보여줍니다:" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" -msgstr "차분 개인정보 보호 사용" - -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-implement-strategies.rst:209 msgid "" -"This guide explains how you can utilize differential privacy in the Flower " -"framework. If you are not yet familiar with differential privacy, you can " -"refer to :doc:`explanation-differential-privacy`." -msgstr "" -"이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법" -"을 설명합니다. 차분 개인정보 보호에 대해 아직 익숙하지 않은 경우 :doc:" -"`explanation-differential-privacy`를 참조하세요." +"The Flower server will call :code:`initialize_parameters`, which either " +"returns the parameters that were passed to :code:`initial_parameters`, or" +" :code:`None`. If no parameters are returned from " +":code:`initialize_parameters` (i.e., :code:`None`), the server will " +"randomly select one client and ask it to provide its parameters. This is " +"a convenience feature and not recommended in practice, but it can be " +"useful for prototyping. In practice, it is recommended to always use " +"server-side parameter initialization." +msgstr "" +"Flower 서버는 :code:`initialize_parameters`를 호출하여 " +":code:`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니다. " +":code:`initial_parameters`에서 반환되는 매개변수가 없는 경우(즉, :code:`None`) 서버는 무작위로 " +"클라이언트 하나를 선택하여 해당 클라이언트에 매개변수를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 " +"프로토타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하는 것이 좋습니다." -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-implement-strategies.rst:213 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free " -"contact us to discuss your requirements and to receive guidance on how to " -"best use these features." +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -"Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있" -"는 프로덕션 환경에서 이러한 기능을 사용할 계획이라면 언제든지 문의하여 요구 " -"사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." +"서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. " +"또한 연합 학습을 사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 데 필요한 기본 기능입니다." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:216 +msgid "The :code:`configure_fit` method" +msgstr ":code:`configure_fit` 메서드" + +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"This approach consists of two seprate phases: clipping of the updates and " -"adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on the " -"server side or the client side." +":code:`configure_fit` is responsible for configuring the upcoming round " +"of training. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" -"이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계" -"로 구성됩니다. 클리핑 단계의 경우, Flower 프레임워크는 클리핑을 서버 측에서 " -"수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." +":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 의미하나요? " +"라운드를 구성한다는 것은 클라이언트를 선택하고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. " +"code:`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:231 msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to the " -"need to perform the clipping operation for all clients." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_fit`:" msgstr "" -"**Server-side Clipping**: 이 방식은 서버가 모든 클라이언트의 업데이트에 대해 " -"균일한 클리핑을 적용하고 클리핑 값에 대한 통신 오버헤드를 줄일 수 있다는 장점" -"이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문" -"에 서버의 계산 부하가 증가한다는 단점도 있습니다." +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_fit`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:233 +#: ../../source/how-to-implement-strategies.rst:280 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the " -"computational overhead on the server. However, it also has the disadvantage " -"of lacking centralized control, as the server has less control over the " -"clipping process." +"Use the :code:`client_manager` to randomly sample all (or a subset of) " +"available clients (each represented as a :code:`ClientProxy` object)" msgstr "" -"**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장" -"점이 있습니다. 하지만 서버가 클리핑 프로세스에 대한 통제력이 떨어지기 때문에 " -"centralized 제어가 부족하다는 단점도 있습니다." - -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" -msgstr "서버 측 클리핑" +":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집합)를 무작위로 샘플링합니다(각각 " +":code:`ClientProxy` 개체로 표시됨)" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-implement-strategies.rst:234 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are :code:" -"`DifferentialPrivacyServerSideFixedClipping` and :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive " -"clipping." +"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" msgstr "" -"서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸" -"는 래퍼 역할을 하는 두 개의 :code:`Strategy` 클래스가 있습니다(예: :code:" -"`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을 위한 :code:" -"`DifferentialPrivacyServerSideFixedClipping`과 :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping`입니다." +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" -msgstr "서버 측 클리핑" - -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-implement-strategies.rst:236 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-side " -"fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` " -"wrapper class. The same approach can be used with :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"More sophisticated implementations can use :code:`configure_fit` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_fit`." msgstr "" -"아래 코드 샘플은 :code:`FedAvg` 전략이 :code:" -"`DifferentialPrivacyServerSideFixedClipping` 래퍼 클래스를 사용하여 서버 측 " -"고정 클리핑을 사용할 수 있도록 합니다. 해당 입력 매개변수를 조정하여 :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 " -"수 있습니다." - -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" -msgstr "클라이언트 측 클리핑" +"보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 있습니다. " +"클라이언트는 :code:`configure_fit`에서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 " +"라운드에 참여합니다." -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-implement-strategies.rst:240 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower :" -"code:`Mods` to perform the clipping. Two mods are available for fixed and " -"adaptive client-side clipping: :code:`fixedclipping_mod` and :code:" -"`adaptiveclipping_mod` with corresponding server-side wrappers :code:" -"`DifferentialPrivacyClientSideFixedClipping` and :code:" -"`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" -"클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이" -"언트에 클리핑 값을 보냅니다. 클라이언트는 기존 Flower :code:`Mods`를 사용하" -"여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가" -"지 모드를 사용할 수 있습니다: :code:`fixedclipping_mod` 및 :code:" -"`adaptiveclipping_mod`와 해당 서버 측 래퍼 :code:" -"`DifferentialPrivacyClientSideFixedClipping` 및 :code:" -"`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." +msgstr "" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 클라이언트별로 정의되므로 각 클라이언트에 서로 " +"다른 명령어를 전송할 수 있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 클라이언트마다 다른 하이퍼파라미터를" +" 사용하는 사용자 지정 전략을 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" -msgstr "클라이언트 측 클리핑" +#: ../../source/how-to-implement-strategies.rst:243 +msgid "The :code:`aggregate_fit` method" +msgstr ":code:`aggregate_fit` 메서드" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the :code:" -"`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the " -"client, :code:`fixedclipping_mod`:" +":code:`aggregate_fit` is responsible for aggregating the results returned" +" by the clients that were selected and asked to train in " +":code:`configure_fit`." msgstr "" -"아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 " -"프라이버시를 사용할 수 있도록 :code:" -"`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 :" -"code:`fixedclipping_mod`를 모두 사용하도록 합니다:" +"code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 클라이언트가 반환한 " +"결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-implement-strategies.rst:258 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` needs " -"to configure the matching :code:`fixedclipping_mod` to perform the client-" -"side clipping:" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." msgstr "" -"서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`" -"이 일치하는 :code:`fixedclipping_mod`를 구성해야 합니다:" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_fit`을 통해). 따라서 :code:`aggregate_fit`은 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-implement-strategies.rst:260 msgid "" -"To utilize local differential privacy (DP) and add noise to the client model " -"parameters before transmitting them to the server in Flower, you can use the " -"`LocalDpMod`. The following hyperparameters need to be set: clipping norm " -"value, sensitivity, epsilon, and delta." +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" +" dictionary of aggregated metrics. The :code:`Parameters` return value is" +" optional because :code:`aggregate_fit` might decide that the results " +"provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -"로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하" -"기 전에 노이즈를 추가하려면 `LocalDpMod`를 사용하면 됩니다. 클리핑 노멀 값, " -"감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." - -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" -msgstr "로컬 DP mod" +"code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 dictionary를 " +"반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`이 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" +#: ../../source/how-to-implement-strategies.rst:263 +msgid "The :code:`configure_evaluate` method" +msgstr ":code:`configure_evaluate` 메서드" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-implement-strategies.rst:265 msgid "" -"Please note that the order of mods, especially those that modify parameters, " -"is important when using multiple modifiers. Typically, differential privacy " -"(DP) modifiers should be the last to operate on parameters." +":code:`configure_evaluate` is responsible for configuring the upcoming " +"round of evaluation. What does *configure* mean in this context? " +"Configuring a round means selecting clients and deciding what " +"instructions to send to these clients. The signature of " +":code:`configure_evaluate` makes this clear:" msgstr "" -"여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서" -"가 중요하다는 점에 유의하세요. 일반적으로 차분 프라이버시(DP) 수정자는 매개변" -"수에서 가장 마지막에 작동해야 합니다." +":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 " +"의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. " +":code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" -msgstr "Privacy Engines을 사용한 로컬 훈련" +#: ../../source/how-to-implement-strategies.rst:278 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_evaluate`:" +msgstr "" +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_evaluate`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-implement-strategies.rst:281 msgid "" -"For ensuring data instance-level privacy during local model training on the " -"client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, please " -"refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_)." +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " +"the current global model :code:`parameters` and :code:`config` dict" msgstr "" -"클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 " -"보호를 보장하려면 Opacus 및 TensorFlow Privacy와 같은 개인 정보 보호 엔진을 " -"활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 Flower " -"examples directory (`Opacus `_, `Tensorflow Privacy `_)를 참조하세요." +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "전략 사용하기" - -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-implement-strategies.rst:283 msgid "" -"Flower allows full customization of the learning process through the :code:" -"`Strategy` abstraction. A number of built-in strategies are provided in the " -"core framework." +"More sophisticated implementations can use :code:`configure_evaluate` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_evaluate`." msgstr "" -"Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의" -"할 수 있습니다. 핵심 프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." +"보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 " +"있습니다. 클라이언트는 :code:`configure_evaluate`에서 반환된 목록에 해당 :code:`ClientProxy`가" +" 포함된 경우에만 라운드에 참여합니다." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-implement-strategies.rst:287 msgid "" -"There are three ways to customize the way Flower orchestrates the learning " -"process on the server side:" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" -"서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 " -"가지가 있습니다:" - -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" - -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "콜백 함수로 기존 전략 사용자 지정" - -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "새로운 전략 구현" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 " +"전송할 수 있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 모델을 평가하거나 클라이언트마다 다른 " +"하이퍼파라미터를 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "기존 전략 사용" +#: ../../source/how-to-implement-strategies.rst:291 +msgid "The :code:`aggregate_evaluate` method" +msgstr ":code:`aggregate_evaluate` 메서드" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-implement-strategies.rst:293 msgid "" -"Flower comes with a number of popular federated learning strategies built-" -"in. A built-in strategy can be instantiated as follows:" +":code:`aggregate_evaluate` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +":code:`configure_evaluate`." msgstr "" -"Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 " -"제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" +"code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요청한 " +"클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-implement-strategies.rst:306 msgid "" -"This creates a strategy with all parameters left at their default values and " -"passes it to the :code:`start_server` function. It is usually recommended to " -"adjust a few parameters during instantiation:" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " +"receives a list of :code:`results`, but also a list of :code:`failures`." msgstr "" -"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:" -"`start_server` 함수에 전달됩니다. 일반적으로 인스턴스화 중에 몇 가지 매개변수" -"를 조정하는 것이 좋습니다:" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_evaluate`를 통해). 따라서 :code:`aggregate_evaluate`는 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-implement-strategies.rst:308 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" +" dictionary of aggregated metrics. The :code:`float` return value is " +"optional because :code:`aggregate_evaluate` might decide that the results" +" provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -"기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 " -"사용하면 전략이 실행 중에 사용자가 제공한 코드를 호출할 수 있습니다." - -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "클라이언트 적합성 및 클라이언트 평가 구성" +"code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 dictionary를 " +"반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-use-strategies.rst:47 -msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function will " -"be called by the strategy and must return a dictionary of configuration key " -"values pairs that will be sent to the client. It must return a dictionary of " -"arbitrary configuration values :code:`client.fit` and :code:`client." -"evaluate` functions during each round of federated learning." -msgstr "" -"서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 " -":code:`on_fit_config_fn`에 함수를 제공할 수 있습니다. 제공된 함수는 전략에 " -"의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 " -"합니다. 연합 학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client." -"fit` 및 :code:`client.evaluate` 함수를 반환해야 합니다." +#: ../../source/how-to-implement-strategies.rst:311 +msgid "The :code:`evaluate` method" +msgstr ":code:`evaluate` 메서드" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:313 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive the " -"dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client." -"fit()` function." +":code:`evaluate` is responsible for evaluating model parameters on the " +"server-side. Having :code:`evaluate` in addition to " +":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " +"to perform both servers-side and client-side (federated) evaluation." msgstr "" -":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, " -"예를 들어 학습 속도를 조정하기 위해 매 라운드마다 이 값을 잠재적으로 변경하" -"는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` 함수에서 :" -"code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." +":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " +"code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 " +":code:`evaluate`를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전략을 사용할 수" +" 있습니다." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-implement-strategies.rst:323 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also :code:" -"`on_evaluate_config_fn` to customize the configuration sent to :code:`client." -"evaluate()`" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +":code:`evaluate` method might not complete successfully (e.g., it might " +"fail to load the server-side evaluation data)." msgstr "" -":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구" -"성을 사용자 지정하는 :code:`on_evaluate_config_fn`도 있습니다" +"반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:`evaluate` 메서드가 성공적으로 완료되지 " +"않을 수 있기 때문에(예: 서버 측 평가 데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "서버 측 평가 구성" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "Flower 설치" -#: ../../source/how-to-use-strategies.rst:83 -msgid "" -"Server-side evaluation can be enabled by passing an evaluation function to :" -"code:`evaluate_fn`." -msgstr "" -"서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니" -"다." +#: ../../source/how-to-install-flower.rst:6 +msgid "Python version" +msgstr "Python 버전" -#: ../../source/how-to-use-strategies.rst:89 -msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides the " -"most flexibility. Read the `Implementing Strategies `_ guide to learn more." -msgstr "" -"완전한 사용자 지정 전략을 작성하는 것은 조금 더 복잡하지만 유연성이 가장 뛰어" -"납니다. 자세한 내용은 `Implementing Strategies `_ 가이드를 참조하세요." +#: ../../source/how-to-install-flower.rst:12 +msgid "Install stable release" +msgstr "안정적인 릴리즈 설치" -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "튜토리얼" +#: ../../source/how-to-install-flower.rst:15 +#: ../../source/how-to-upgrade-to-flower-next.rst:46 +msgid "Using pip" +msgstr "pip 사용" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "빠른 시작 튜토리얼" +#: ../../source/how-to-install-flower.rst:17 +msgid "" +"Stable releases are available on `PyPI " +"`_::" +msgstr "안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 있습니다::" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "사용 방법 가이드" +#: ../../source/how-to-install-flower.rst:21 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra::" +msgstr "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 ``simulation``extra와 함께 설치해야 합니다:" -#: ../../source/index.rst:99 -msgid "Legacy example guides" -msgstr "레거시 예제 가이드" +#: ../../source/how-to-install-flower.rst:27 +msgid "Using conda (or mamba)" +msgstr "conda(또는 mamba) 사용" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "설명" +#: ../../source/how-to-install-flower.rst:29 +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." -#: None:-1 -msgid "API reference" -msgstr "API 참조" +#: ../../source/how-to-install-flower.rst:31 +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" +msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "참조 문서" +#: ../../source/how-to-install-flower.rst:36 +msgid "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" +msgstr "conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" -msgstr "기여자 튜토리얼" +#: ../../source/how-to-install-flower.rst:40 +msgid "or with ``mamba``::" +msgstr "또는 ``mamba``::" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" -msgstr "기여자 사용법 가이드" +#: ../../source/how-to-install-flower.rst:46 +msgid "Verify installation" +msgstr "설치 확인" -#: ../../source/index.rst:172 -msgid "Contributor explanations" -msgstr "기여자 설명" +#: ../../source/how-to-install-flower.rst:48 +#, fuzzy +msgid "" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" +msgstr "" +"다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모든 것이 정상적으로 작동하면 명령줄에 " +"Flower의 버전이 출력됩니다:" -#: ../../source/index.rst:178 -msgid "Contributor references" -msgstr "기여자 참조" +#: ../../source/how-to-install-flower.rst:58 +msgid "Advanced installation options" +msgstr "고급 설치 옵션" -#: ../../source/index.rst:-1 -msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "연합 학습을 위한 Python 개발을 쉽게 할 수 있는 주요 Flower 프레임워크의 " -"설명서를 확인하세요." +#: ../../source/how-to-install-flower.rst:61 +msgid "Install via Docker" +msgstr "Docker를 통해 설치" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "플라워 프레임워크 문서" +#: ../../source/how-to-install-flower.rst:63 +#, fuzzy +msgid ":doc:`Run Flower using Docker `" +msgstr ":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" -#: ../../source/index.rst:7 +#: ../../source/how-to-install-flower.rst:66 +msgid "Install pre-release" +msgstr "사전 릴리즈 설치" + +#: ../../source/how-to-install-flower.rst:68 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens::" msgstr "" -"Flower 문서에 오신 것을 환영합니다. Flower `_는 편한 연합 " -"학습 프레임워크입니다." - -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "Flower 커뮤니티 가입하기" +"새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버전(알파, 베타, 릴리즈 후보)으로 제공되는 " +"경우가 있습니다:" -#: ../../source/index.rst:13 +#: ../../source/how-to-install-flower.rst:72 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra::" msgstr "" -"Flower 커뮤니티는 연구원, 엔지니어, 학생, 전문가, 학자 및 기타 애호가들로 구" -"성된 편한 그룹으로 빠르게 성장하고 있습니다." +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 ``simulation`` extra와 함께 " +"설치해야 합니다:" -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "Slack에 가입하세요" +#: ../../source/how-to-install-flower.rst:77 +msgid "Install nightly release" +msgstr "야간 릴리즈 설치" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower 프레임워크" +#: ../../source/how-to-install-flower.rst:79 +msgid "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases::" +msgstr "Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩니다:" -#: ../../source/index.rst:25 +#: ../../source/how-to-install-flower.rst:83 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to " -"learn more." +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra::" msgstr "" -"이 사용자 가이드는 Flower를 사용해 기존 머신 러닝 워크로드를 연합된 환경으로 " -"가져오고자 하는 연구자와 개발자를 대상으로 합니다. Flower의 설계 목표 중 하나" -"는 이를 간단하게 만드는 것이었습니다. 자세히 알아보려면 계속 읽어보세요." +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 ``simulation`` extr와 함께 " +"설치해야 합니다::" -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "튜토리얼" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "모니터 시뮬레이션" -#: ../../source/index.rst:32 +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"A learning-oriented series of federated learning tutorials, the best place " -"to start." -msgstr "학습 중심의 연합 학습 튜토리얼 시리즈로, 시작하기에 가장 좋은 곳입니다." - -#: ../../source/index.rst:61 -msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:" -"`TensorFlow ` | :doc:`🤗 Transformers " -"` | :doc:`JAX ` | :" -"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:" -"`XGBoost ` | :doc:`Android ` | :doc:`iOS `" -msgstr "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:" -"`TensorFlow ` | :doc:`🤗 Transformers " -"` | :doc:`JAX ` | :" -"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:" -"`XGBoost ` | :doc:`Android ` | :doc:`iOS `" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" -msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" - -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "그리고 TensorFlow도:" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." +msgstr "" +"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있습니다. 또한 Flower 시뮬레이션 엔진은 " +"강력하며 클라이언트별 리소스 할당 방법을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통해 더 현명한 " +"결정을 내리고 실행 시간을 단축할 수 있습니다." -#: ../../source/index.rst:76 +#: ../../source/how-to-monitor-simulation.rst:6 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a specific " -"goal." +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -"문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." +"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관리자가 설치되어 있다고 " +"가정합니다." -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:10 +msgid "Downloads" +msgstr "다운로드" + +#: ../../source/how-to-monitor-simulation.rst:16 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -"이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 " -"설명하고 토론합니다." +"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " +"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 도구는 모두 Flower가 " +"내부적으로 사용하는 `Ray `_와 잘 통합되어 있습니다." -#: ../../source/index.rst:120 -msgid "References" -msgstr "참조" +#: ../../source/how-to-monitor-simulation.rst:18 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "정보 지향 API 참조 및 기타 참고 자료." +#: ../../source/how-to-monitor-simulation.rst:20 +msgid "If you are on an M1 Mac, it should be:" +msgstr "M1 Mac을 사용 중이라면:" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" -msgstr ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:27 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "이전 세대 Intel Mac 장치에서는:" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." -msgstr "Flower 메인 패키지." +#: ../../source/how-to-monitor-simulation.rst:34 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "기여자 문서" +#: ../../source/how-to-monitor-simulation.rst:44 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "" +"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정을 붙여넣습니다. 요구 사항에 따라 시간 " +"간격을 조정할 수 있습니다:" -#: ../../source/index.rst:150 +#: ../../source/how-to-monitor-simulation.rst:59 msgid "" -"The Flower community welcomes contributions. The following docs are intended " -"to help along the way." +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" -"Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 문서는 그 과정에서 도움을 " -"드리기 위한 문서입니다." +"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 수행합니다. 이전과 마찬가지로 다음 " +"명령 중 하나를 사용하여 파일을 엽니다:" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" -msgstr "Flower CLI 참조" +#: ../../source/how-to-monitor-simulation.rst:69 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" -msgstr "flower 시뮬레이션" +#: ../../source/how-to-monitor-simulation.rst:84 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower 초연결" +#: ../../source/how-to-monitor-simulation.rst:88 +msgid "Tracking metrics" +msgstr "매트릭 트래킹" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" -msgstr "flower 클라이언트 앱" +#: ../../source/how-to-monitor-simulation.rst:90 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" -msgstr "flower 서버 프로그램" +#: ../../source/how-to-monitor-simulation.rst:97 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" -msgstr "flwr" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "Now, you are ready to start your workload." +msgstr "이제 워크로드를 시작할 준비가 되었습니다." -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" -msgstr "Modules" +#: ../../source/how-to-monitor-simulation.rst:110 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" -msgstr ":py:obj:`flwr.client `\\" +#: ../../source/how-to-monitor-simulation.rst:117 +msgid "You can look at everything at ``_ ." +msgstr "``_ 에서 모든 것을 볼 수 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "Flower 클라이언트." +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" -msgstr ":py:obj:`flwr.common `\\" +#: ../../source/how-to-monitor-simulation.rst:121 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." +msgstr "" +"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인할 수도 있습니다. Ray 대시보드는 " +"시뮬레이션 중에만 액세스할 수 있다는 점에 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " +"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니다." +#: ../../source/how-to-monitor-simulation.rst:123 +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port :code:`3000` on " +"your machine as long as they are running." +msgstr "" +"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " +":code:`3000` 등을 차단하므로 이 작업이 중요합니다." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" -msgstr ":py:obj:`flwr.server `\\" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "Resource allocation" +msgstr "리소스 할당" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." -msgstr "Flower 서버." +#: ../../source/how-to-monitor-simulation.rst:134 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" -msgstr ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-monitor-simulation.rst:136 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" +"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능한 모든 리소스를 사용하여 시작되며, 이 리소스는 " +"클라이언트 간에 공유됩니다. 그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동시에 모델 학습이 이루어지는" +" 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." -msgstr "Flower 시뮬레이션." +#: ../../source/how-to-monitor-simulation.rst:143 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "클라이언트" +#: ../../source/how-to-monitor-simulation.rst:155 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" -msgstr "함수" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "Let’s also specify the resource for a single client." +msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:205 +msgid "" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." +msgstr "" +"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는 등) 새 " +"클라이언트를 시작합니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." -msgstr "Flower 클라이언트 앱을 실행합니다." +#: ../../source/how-to-monitor-simulation.rst:207 +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " +"running two clients and therefore enable them to run concurrently. Be " +"careful not to require more resources than available. If you specified " +":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " +"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +msgstr "" +"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않습니다. :code:`client_num_gpus = " +"0.5` 를 설정하면 두 개의 클라이언트를 실행할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소스를" +" 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬레이션이 시작되지 않습니다(GPU가 " +"2개이지만 :code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "자주 묻는 질문" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." -msgstr "Flower SuperNode를 실행합니다." +#: ../../source/how-to-monitor-simulation.rst:214 +msgid "Q: I don't see any metrics logged." +msgstr "질문: 기록된 메트릭이 보이지 않습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:216 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). 시뮬레이션이 실행된 " +"기간을 반영하도록 기간을 변경해 주세요." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "Flower 서버에 연결되는 Flower 클라이언트 노드를 시작합니다." - -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:218 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\, " -"server\\_address\\, client\\)" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" -":py:obj:`start_numpy_client `\\ \\(\\*\\, " -"server\\_address\\, client\\)" +"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가 실행 중인지 " +"확인하고 이 페이지를 새로고침하세요.\"라는 메시지가 표시됩니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." -msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." +#: ../../source/how-to-monitor-simulation.rst:220 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" -msgstr "클래스" +#: ../../source/how-to-monitor-simulation.rst:226 +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"``_." +msgstr "Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:228 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "Flower 클라이언트를 위한 추상 베이스 클래스입니다." +#: ../../source/how-to-monitor-simulation.rst:232 +msgid "Resources" +msgstr "리소스" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-monitor-simulation.rst:234 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" +"Ray Dashboard: ``_" msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" - -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." -msgstr "Flower ClientApp." +"Ray 대시보드: ``_" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:236 +msgid "Ray Metrics: ``_" +msgstr "Ray 메트릭: ``_" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 클래스입니다." +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "시뮬레이션 실행" -#: ../../source/ref-api/flwr.client.rst:52::1 -msgid ":py:obj:`flwr.client.mod `\\" -msgstr ":py:obj:`flwr.client.mod `\\" +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" +"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클라이언트 집단에서 워크로드를 실행하되 많은 " +"수의 물리적 장치를 소싱, 구성 및 관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨팅 시스템에서 " +"최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 " +"다양한 시나리오에서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사례는 FL 워크로드 시뮬레이션이 " +"적합한 사용 사례 중 일부입니다. Flower는 `VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " +"있습니다." -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -msgid "Flower Built-in Mods." -msgstr "Flower 내장 모드." +#: ../../source/how-to-run-simulations.rst:10 +msgid "" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" +msgstr "" +":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니다. 이러한 클라이언트는 " +"`non-virtual` 클라이언트(예: `flwr.client.start_client `_ 명령을 통해 실행하는 클라이언트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 " +"방식으로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언트는 다음과 같습니다:" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-run-simulations.rst:12 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할당받는다는 것을 의미합니다. 사용자는 " +"시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " +"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" -msgstr "메소드" +#: ../../source/how-to-run-simulations.rst:13 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." +msgstr "" +"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " +":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:14 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." +msgstr "" +"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니다(예: `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" +" 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 평가합니다." +#: ../../source/how-to-run-simulations.rst:16 +msgid "" +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." +msgstr "" +":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " +"`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " +":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" +msgstr "Flower 시뮬레이션 시작" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 구체화합니다." +#: ../../source/how-to-run-simulations.rst:22 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" +msgstr "" +"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함수를 정의하여 데이터 세트를 다운로드하고 로드(및" +" 파티션)해야 합니다. 이 작업을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 " +"같습니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:44 +msgid "VirtualClientEngine resources" +msgstr "VirtualClientEngine 리소스" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." -msgstr "이 클라이언트에서 실행 컨텍스트를 가져옵니다." +#: ../../source/how-to-run-simulations.rst:45 +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +":code:`ray_init_args` input argument to :code:`start_simulation` which " +"the VCE internally passes to Ray's :code:`ray.init` command. For a " +"complete list of settings you can configure check the `ray.init " +"`_" +" documentation. Do not set :code:`ray_init_args` if you want the VCE to " +"use all your system's CPUs and GPUs." +msgstr "" +"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 있으며, 이는 Ray를 시작할 때의 기본" +" 동작이기도 합니다. 그러나 일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 설정은 " +"VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:`start_simulation`에 대한 " +":code:`ray_init_args` 입력 인수를 통해 수행할 수 있습니다. 구성할 수 있는 전체 설정 목록은 `ray.init " +"`_" +" 설명서를 확인하세요. VCE가 시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지" +" 마세요." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:62 +msgid "Assigning client resources" +msgstr "클라이언트 리소스 할당" + +#: ../../source/how-to-run-simulations.rst:63 msgid "" -":py:obj:`get_parameters `\\ \\(ins\\)" +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" +" nothing else) to each virtual client. This means that if your system has" +" 10 cores, that many virtual clients can be concurrently running." msgstr "" -":py:obj:`get_parameters `\\ \\(ins\\)" +"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어를 할당합니다(그 외에는 " +"아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "현재 로컬 모델 파라미터를 반환합니다." +#: ../../source/how-to-run-simulations.rst:65 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" +"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이언트에 할당되는 리소스를 조정하고 싶을 것입니다." +" 시뮬레이션을 시작할 때 `client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 " +"내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:67 +msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." + +#: ../../source/how-to-run-simulations.rst:68 msgid "" -":py:obj:`get_properties `\\ \\(ins\\)" +":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " +"assigned." +msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." + +#: ../../source/how-to-run-simulations.rst:70 +msgid "Let's see a few examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" + +#: ../../source/how-to-run-simulations.rst:89 +msgid "" +"While the :code:`client_resources` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +":code:`VirtualClientEngine` will schedule 100 jobs to run (each " +"simulating a client sampled by the strategy) and then will execute them " +"in a resource-aware manner in batches of 8." +msgstr "" +"code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있지만, 동일한 라운드에서 수십, " +"수백 또는 수천 개의 클라이언트를 실행하고 훨씬 더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 " +"수는 없습니다. 라운드당 100명의 클라이언트를 받고 싶지만 시스템이 동시에 8명의 클라이언트만 수용할 수 있다고 가정해 봅시다. " +"code:`VirtualClientEngine`은 실행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 " +"다음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." + +#: ../../source/how-to-run-simulations.rst:91 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -":py:obj:`get_properties `\\ \\(ins\\)" +"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정의하는 방법에 대한 모든 복잡한 세부 사항을 " +"이해하려면 'Ray 문서 '를 참조하세요." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "클라이언트의 속성 집합을 반환합니다." +#: ../../source/how-to-run-simulations.rst:94 +msgid "Simulation examples" +msgstr "시뮬레이션 예제" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-run-simulations.rst:96 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" +msgstr "" +"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이션 예제는 `Flower 레포지토리 " +"`_에서 제공됩니다. Google Colab에서도 실행할 수 있습니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." -msgstr "이 클라이언트에 실행 컨텍스트를 적용합니다." +#: ../../source/how-to-run-simulations.rst:98 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"`Tensorflow/Keras 시뮬레이션 " +"`_: 100개의 클라이언트가 공동으로 MNIST에서 MLP 모델을 훈련합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:99 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." +msgstr "" +"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "클라이언트(자체)를 반환합니다." +#: ../../source/how-to-run-simulations.rst:104 +msgid "Multi-node Flower simulations" +msgstr "멀티 노드 Flower 시뮬레이션" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" -msgstr "속성" +#: ../../source/how-to-run-simulations.rst:106 +msgid "" +"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " +"across multiple compute nodes. Before starting your multi-node simulation" +" ensure that you:" +msgstr "" +"Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬레이션을 실행할 수 " +"있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항을 확인하세요:" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +#: ../../source/how-to-run-simulations.rst:108 +msgid "Have the same Python environment in all nodes." +msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "파라미터" +#: ../../source/how-to-run-simulations.rst:109 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:110 msgid "" -"The evaluation instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to customize " -"the local evaluation process." +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -"서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 " -"사용되는 구성 값 사전이 포함된 평가 지침입니다." +"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation considerations " +"`에서 확인하세요)" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" -msgstr "반환" +#: ../../source/how-to-run-simulations.rst:111 +msgid "" +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " +"`_ so the " +":code:`VirtualClientEngine` attaches to a running Ray instance." +msgstr "" +":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 " +":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." + +#: ../../source/how-to-run-simulations.rst:112 +msgid "" +"Start Ray on you head node: on the terminal type :code:`ray start " +"--head`. This command will print a few lines, one of which indicates how " +"to attach other nodes to the head node." +msgstr "" +"헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 명령은 몇 줄을 출력하며, 그" +" 중 하나는 다른 노드를 헤드 노드에 연결하는 방법을 나타냅니다." + +#: ../../source/how-to-run-simulations.rst:113 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +":code:`ray start --address='192.168.1.132:6379'`" +msgstr "" +"헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드의 터미널에서 실행합니다: 예: :code:`ray" +" start --address='192.168.1.132:6379'`" + +#: ../../source/how-to-run-simulations.rst:115 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 헤드 노드에서 코드를 실행할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:117 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command :code:`ray stop` in each node's " +"terminal (including the head node)." +msgstr "" +"시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널에서 :code:`ray stop` 명령을 실행하기만 " +"하면 됩니다." + +#: ../../source/how-to-run-simulations.rst:120 +msgid "Multi-node simulation good-to-know" +msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" + +#: ../../source/how-to-run-simulations.rst:122 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" + +#: ../../source/how-to-run-simulations.rst:124 +msgid "" +"User :code:`ray status` to check all nodes connected to your head node as" +" well as the total resources available to the " +":code:`VirtualClientEngine`." +msgstr "" +"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 " +":code:`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:126 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +":code:`VirtualClientEngine` can schedule as many `virtual` clients as " +"that node can possible run. In some settings you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"`--num-cpus=` and/or `--num-" +"gpus=` in any :code:`ray start` command (including " +"when starting the head)" +msgstr "" +"새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 헤드 노드에 표시됩니다. 즉, " +":code:`VirtualClientEngine`은 해당 노드가 실행할 수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. " +"일부 설정에서는 시뮬레이션에서 특정 리소스를 제외하고 싶을 수 있습니다. 모든 :code:`ray start` 명령(헤드 시작 시 " +"포함)에 `--num-cpus=` 및/또는 `--num-" +"gpus=`를 추가하여 이 작업을 수행하면 됩니다" + +#: ../../source/how-to-run-simulations.rst:132 +msgid "Considerations for simulations" +msgstr "시뮬레이션 시 고려 사항" + +#: ../../source/how-to-run-simulations.rst:135 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." + +#: ../../source/how-to-run-simulations.rst:138 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" +"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 고성능 GPU 노드에서 복잡한 FL 파이프라인을 " +"훈련하든 상관없이 시뮬레이션 모드에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가하는 " +"동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 가지 사항을 강조합니다. 또한 현재 구현에서 몇 " +"가지 제한 사항을 강조합니다." + +#: ../../source/how-to-run-simulations.rst:141 +msgid "GPU resources" +msgstr "GPU 리소스" + +#: ../../source/how-to-run-simulations.rst:143 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"internally by the VCE) is by default:" +msgstr "" +"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트에 GPU 메모리 " +"공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 사용됩니다:" + +#: ../../source/how-to-run-simulations.rst:146 +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set :code:`num_gpus=0.5` and you have two GPUs in your system with " +"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" +" concurrently." +msgstr "" +"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: 32GB와 8GB) VRAM 용량을 가진 두" +" 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." + +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "" +"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서 두 가지 시사점을 " +"얻을 수 있습니다:" + +#: ../../source/how-to-run-simulations.rst:149 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" +"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다(예: `evaluate method `_를 사용할 때)" + +#: ../../source/how-to-run-simulations.rst:150 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " +"experiment." +msgstr "" +"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시작할 때 " +":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." + +#: ../../source/how-to-run-simulations.rst:153 +msgid "" +"In addition, the GPU resource limits passed to :code:`client_resources` " +"are not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" +"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, 초과할 수 있음) " +"클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 VRAM을 사용하는 상황이 발생할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:156 +msgid "TensorFlow with GPUs" +msgstr "GPU를 사용한 TensorFlow" + +#: ../../source/how-to-run-simulations.rst:158 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." +msgstr "" +"`TensorFlow와 함께 GPU를 사용 `_하면 프로세스에 " +"보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목적으로 TensorFlow에서 수행됩니다. 그러나 " +"GPU를 여러 개의 '가상' 클라이언트로 분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니다. 다행히도 " +"'메모리 증가 활성화 " +"`_'를 통해 " +"이 기본 동작을 비활성화할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:160 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " +"in order to specify a function to be executed upon actor initialization. " +"In this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" +"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행해야 합니다. " +":code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 실행할 " +"함수를 지정할 수 있습니다. 이 경우 TF 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" + +#: ../../source/how-to-run-simulations.rst:179 +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." +msgstr "" +"이것이 바로`Tensorflow/Keras Simulation " +"`_ 예제에서 사용된 메커니즘입니다." + +#: ../../source/how-to-run-simulations.rst:183 +msgid "Multi-node setups" +msgstr "멀티 노드 설정" + +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" +"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제공하지 않습니다. 즉, 클라이언트가 실행하는 데 " +"필요한 리소스가 하나 이상의 노드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있습니다. FL 프로세스" +" 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 " +"방식에 따라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 위해 데이터 세트 제공 메커니즘(예: " +"nfs, 데이터베이스 사용)을 사용해야 할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." +msgstr "" +"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 상태는 Flower 클라이언트 클래스의 일부로 구현할" +" 수 있지만, 사용자는 이를 영구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없이 동일한 " +"클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 " +"위의 요점과도 관련이 있습니다." + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "모델 체크포인트 저장 및 로드" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." +msgstr "" +"Flower는 서버 측에서 모델 업데이트를 자동으로 저장하지 않습니다. 이 사용법 가이드에서는 Flower에서 모델 체크포인트를 " +"저장(및 로드)하는 단계에 대해 설명합니다." + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "모델 체크포인트" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +":code:`Strategy` methods. Implementing custom strategies is always an " +"option, but for many cases it may be more convenient to simply customize " +"an existing strategy. The following code example defines a new " +":code:`SaveModelStrategy` which customized the existing built-in " +":code:`FedAvg` strategy. In particular, it customizes " +":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " +"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" +" before it returns those aggregated weights to the caller (i.e., the " +"server):" +msgstr "" +":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 수 있습니다. 사용자 지정 전략을 " +"구현하는 것은 항상 옵션이지만 대부분의 경우 기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시는 " +"기존의 기본 제공 :code:`FedAvg` 전략을 사용자 지정한 새로운 :code:`SaveModelStrategy`를 " +"정의합니다. 특히, 기본 클래스(:code:`FedAvg`)에서 :code:`aggregate_fit`을 호출하여 " +":code:`aggregate_fit`을 사용자 지정합니다. 그런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 " +"반환된(집계된) 가중치를 계속 저장합니다:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +msgid "Save and load PyTorch checkpoints" +msgstr "파이토치 체크포인트 저장 및 로드" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." +msgstr "" +"이전 예제와 비슷하지만 몇 가지 단계가 추가되어 ``torch.save`` 함수를 사용하여 파이토치 체크포인트를 저장하는 방법을 " +"보여드리겠습니다. 먼저, ``aggregate_fit``은 ``Parameters`` 객체를 반환하는데, 이 객체는 NumPy " +"``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파이토치 " +"``state_dict``로 변환됩니다." + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" +msgstr "" +"진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 저장된 모든 체크포인트를 반복하고 최신 체크포인트를 " +"로드합니다:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." +msgstr "``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형의 이 객체를 반환/사용합니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Flower 1.0으로 업그레이드" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +msgid "" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." +msgstr "" +"Flower 1.0이 출시되었습니다. 새로운 기능과 함께 Flower 1.0은 향후 성장을 위한 안정적인 기반을 제공합니다. " +"Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 " +"획기적인 변경 사항이 있습니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 +#: ../../source/how-to-upgrade-to-flower-next.rst:43 +msgid "Install update" +msgstr "업데이트 설치" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방법입니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 설치할 때 ``-U``를 추가합니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" +msgstr "" +"``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 " +"사용하는 경우)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +msgid "" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry: ``pyproject.toml``에서 ``flwr`` dependency을 업데이트한 다음 다시 " +"설치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" +" 것을 잊지 마세요)." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" +msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " +"(``start_simulation`` 사용 시)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-next.rst:100 +msgid "Required changes" +msgstr "필수 변경 사항" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +msgid "The following breaking changes require manual updates." +msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +msgid "General" +msgstr "일반" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "" +"Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" +msgstr "" +"Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "클라이언트" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" +msgstr "" +"``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, config):``로 변경합니다" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" +msgstr "" +"``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, ins: GetParametersIns):``로 변경합니다" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "전략 / ``start_server`` / ``start_simulation``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" +msgstr "" +"Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``에 " +"전달합니다. 다음은 예제입니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" +msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "" +"``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``로" +" 바꿉니다(이전 항목 참조)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." +msgstr "" +"'start_server`` 호출에서 ``force_final_distributed_eval`` 매개변수를 제거합니다. 모든 " +"클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 " +"있습니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +msgstr "" +"전략 초기화: 전략이 ``fraction_fit`` 및 ``fraction_evaluate``의 기본값에 의존하는 경우 " +"``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 수동 설정합니다. 전략을 수동으로 " +"생성하지 않는 프로젝트(전략 인스턴스를 전달하지 않고 ``start_server`` 또는 ``start_simulation``을 " +"호출하여)는 이제 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를" +" 수동으로 초기화해야 합니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +msgstr "" +"``rnd``의 이름을 ``server_round``로 바꿉니다. 이는 여러 메서드 및 함수(예: ``configure_fit``," +" ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate`` 및 " +"``evaluate_fn``)에 영향을 미칩니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "사용자 정의 전략" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +msgid "" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" +msgstr "" +"매개변수 ``failures``의 유형이 ``List[BaseException]``에서 " +"``List[Union[Tuple[ClientProxy], FitRes], " +"BaseException]]``(``aggregate_fit``에서) 및 ``List[Union[Tuple[ClientProxy]," +" EvaluateRes], BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 번째 파라미터로 받습니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "선택적 개선 사항" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." +msgstr "" +"``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제거합니다. 예를 들어" +" 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시자 구현은 더 이상 필요하지 않습니다." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" +msgstr "" +"``start_simulation``을 통해 라운드 타임아웃을 구성합니다: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +#: ../../source/how-to-upgrade-to-flower-next.rst:317 +msgid "Further help" +msgstr "추가 도움말" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." +msgstr "" +"대부분의 공식 ``Flower code 예제 " +"`_는 이미 Flower 1.0으로 " +"업데이트되어 있으며, Flower 1.0 API를 사용하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워" +" 슬랙 `_에 가입하여 ``#questions`` 채널을 이용하세요." + +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" +msgstr "Flower Next 업그레이드" + +#: ../../source/how-to-upgrade-to-flower-next.rst:4 +msgid "" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." +msgstr "" +"Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니다! 이 가이드는 숙련된 사용자든 이제 막" +" 시작한 사용자든 상관없이 기존 설정을 원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " +"수 있도록 도와드립니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:9 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." +msgstr "" +"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으로 ``1.8`` 이전의 Flower 코드를" +" 재사용하는 방법을 보여줍니다. 다른 가이드에서는 순수한 Flower Next API로 Flower Next를 end-to-end로" +" 실행하는 방법을 보여드리겠습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:13 +msgid "Let's dive in!" +msgstr "자세히 알아봅시다!" + +#: ../../source/how-to-upgrade-to-flower-next.rst:48 +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 같습니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:54 +msgid "or if you need Flower Next with simulation:" +msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:61 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" + +#: ../../source/how-to-upgrade-to-flower-next.rst:71 +msgid "or ``pyproject.toml``:" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:82 +msgid "Using Poetry" +msgstr "Poetry 사용" + +#: ../../source/how-to-upgrade-to-flower-next.rst:84 +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." +msgstr "" +"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``poetry install``을 " +"실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는 것을 잊지 마세요)." + +#: ../../source/how-to-upgrade-to-flower-next.rst:86 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:102 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" +msgstr "" +"Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. 코드에서 " +"``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 |clientapp_link|_를 생성하여 " +"시작합니다. 코드에서 ``start_server()``를 통해 서버를 시작하는 대신 |serverapp_link|_를 생성하고 " +"명령줄을 통해 서버를 시작합니다. 서버와 클라이언트의 장기 실행 컴포넌트를 SuperLink와 SuperNode라고 합니다. 수동 " +"업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 " +"사항은 다음과 같습니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:109 +msgid "|clientapp_link|_" +msgstr "|clientapp_link|_" + +#: ../../source/how-to-upgrade-to-flower-next.rst:110 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "" +"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 " +"예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|serverapp_link|_" +msgstr "|serverapp_link|_" + +#: ../../source/how-to-upgrade-to-flower-next.rst:133 +msgid "" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "" +"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 " +"래핑하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:154 +msgid "Deployment" +msgstr "배포" + +#: ../../source/how-to-upgrade-to-flower-next.rst:155 +msgid "" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." +msgstr "" +"실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 후 " +"|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 " +"실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:158 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" + +#: ../../source/how-to-upgrade-to-flower-next.rst:174 +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." +msgstr "" +"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, " +"'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "Simulation in CLI" +msgstr "CLI 시뮬레이션" + +#: ../../source/how-to-upgrade-to-flower-next.rst:202 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" +msgstr "" +"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 " +"|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:232 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" +msgstr "" +"CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 코드에서 " +"``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 " +"``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" + +#: ../../source/how-to-upgrade-to-flower-next.rst:249 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" +msgstr "" +"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` " +"명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:275 +msgid "Simulation in a Notebook" +msgstr "Notebook에서 시뮬레이션" + +#: ../../source/how-to-upgrade-to-flower-next.rst:276 +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:319 +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." +msgstr "" +"일부 공식 ``Flower 코드 예제 `_는 이미 플라워 넥스트에 " +"업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 " +"`_에 가입하고 ``#questions`` 채널을 이용하세요. 또한, " +"``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른" +" 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." + +#: ../../source/how-to-upgrade-to-flower-next.rst:325 +msgid "Important" +msgstr "중요" + +#: ../../source/how-to-upgrade-to-flower-next.rst:328 +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "" +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" + +#: ../../source/how-to-upgrade-to-flower-next.rst:334 +msgid "Happy migrating! 🚀" +msgstr "행복한 마이그레이션! 🚀" + +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "기본 제공 모드 사용" + +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버전에서 변경될 수 있습니다.**" + +#: ../../source/how-to-use-built-in-mods.rst:6 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." +msgstr "" +"이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법을 배우겠습니다. " +"Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업이 처리되기 전과 후에 작업을 수행할 수 있습니다." + +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" +msgstr "Mods란 무엇인가요?" + +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" +msgstr "" +"Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 나가는 ``Message``를 " +"조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음과 같습니다:" + +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" +msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" + +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" +msgstr "Mods 사용" + +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" + +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" +msgstr "1. 필요한 mods를 가져옵니다" + +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" +msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" + +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" +msgstr "2. 클라이언트 기능 정의" + +#: ../../source/how-to-use-built-in-mods.rst:48 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" + +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. mods로 ``ClientApp``을 생성합니다" + +#: ../../source/how-to-use-built-in-mods.rst:59 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "" +"``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods를 제공하는 순서가 " +"중요합니다:" + +#: ../../source/how-to-use-built-in-mods.rst:72 +msgid "Order of execution" +msgstr "실행 순서" + +#: ../../source/how-to-use-built-in-mods.rst:74 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" + +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1``(가장 바깥쪽 mod)" + +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" +msgstr "``example_mod_2`` (다음 mod)" + +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하는 핵심 함수)" + +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2``(돌아가는 방법)" + +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" + +#: ../../source/how-to-use-built-in-mods.rst:82 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" +"각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회가 있으며, 스택 위로 반환하기 전에 " +"나가는 ``Message``도 마찬가지로 검사하고 수정할 수 있습니다." + +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" +"이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키는 방법을 배웠습니다. mods 순서는 " +"매우 중요하며 입력과 출력이 처리되는 방식에 영향을 미친다는 점을 기억하세요." + +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" + +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" +msgstr "차분 개인정보 보호 사용" + +#: ../../source/how-to-use-differential-privacy.rst:3 +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." +msgstr "" +"이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법을 설명합니다. 차분 개인정보 보호에 대해 아직 " +"익숙하지 않은 경우 :doc:`explanation-differential-privacy`를 참조하세요." + +#: ../../source/how-to-use-differential-privacy.rst:7 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" +"Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있는 프로덕션 환경에서 이러한 기능을 사용할 " +"계획이라면 언제든지 문의하여 요구 사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." + +#: ../../source/how-to-use-differential-privacy.rst:12 +#, fuzzy +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" +"이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계로 구성됩니다. 클리핑 단계의 경우, Flower " +"프레임워크는 클리핑을 서버 측에서 수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." + +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" +"**Server-side Clipping**: 이 방식은 서버가 모든 클라이언트의 업데이트에 대해 균일한 클리핑을 적용하고 클리핑 " +"값에 대한 통신 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문에 " +"서버의 계산 부하가 증가한다는 단점도 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "" +"**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 서버가 " +"클리핑 프로세스에 대한 통제력이 떨어지기 때문에 centralized 제어가 부족하다는 단점도 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "Server-side Clipping" +msgstr "서버 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:22 +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" +"서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸는 래퍼 역할을 하는 두 개의 " +":code:`Strategy` 클래스가 있습니다(예: :code:`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을" +" 위한 :code:`DifferentialPrivacyServerSideFixedClipping`과 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`입니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "서버 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." +msgstr "" +"아래 코드 샘플은 :code:`FedAvg` 전략이 " +":code:`DifferentialPrivacyServerSideFixedClipping` 래퍼 클래스를 사용하여 서버 측 고정 " +"클리핑을 사용할 수 있도록 합니다. 해당 입력 매개변수를 조정하여 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 수 " +"있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:52 +msgid "Client-side Clipping" +msgstr "클라이언트 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" +"클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이언트에 클리핑 값을 보냅니다. 클라이언트는 기존 " +"Flower :code:`Mods`를 사용하여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가지 " +"모드를 사용할 수 있습니다: :code:`fixedclipping_mod` 및 :code:`adaptiveclipping_mod`와" +" 해당 서버 측 래퍼 :code:`DifferentialPrivacyClientSideFixedClipping` 및 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" +msgstr "클라이언트 측 클리핑" + +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" +msgstr "" +"아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 프라이버시를 사용할 수 있도록 " +":code:`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 " +":code:`fixedclipping_mod`를 모두 사용하도록 합니다:" + +#: ../../source/how-to-use-differential-privacy.rst:80 +msgid "" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" +msgstr "" +"서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`이 일치하는 " +":code:`fixedclipping_mod`를 구성해야 합니다:" + +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." +msgstr "" +"로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하기 전에 노이즈를 추가하려면 `LocalDpMod`를" +" 사용하면 됩니다. 클리핑 노멀 값, 감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." + +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "로컬 DP mod" + +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" + +#: ../../source/how-to-use-differential-privacy.rst:122 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "" +"여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서가 중요하다는 점에 유의하세요. 일반적으로 차분 " +"프라이버시(DP) 수정자는 매개변수에서 가장 마지막에 작동해야 합니다." + +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" +msgstr "Privacy Engines을 사용한 로컬 훈련" + +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." +msgstr "" +"클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 보호를 보장하려면 Opacus 및 TensorFlow" +" Privacy와 같은 개인 정보 보호 엔진을 활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 " +"Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)를 참조하세요." + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "전략 사용하기" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" +"Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의할 수 있습니다. 핵심 " +"프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 가지가 있습니다:" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "콜백 함수로 기존 전략 사용자 지정" + +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "새로운 전략 구현" + +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "기존 전략 사용" + +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" + +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "" +"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " +"인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 좋습니다:" + +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" +"기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 사용하면 전략이 실행 중에 사용자가 제공한 코드를 " +"호출할 수 있습니다." + +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "클라이언트 적합성 및 클라이언트 평가 구성" + +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" +"서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 :code:`on_fit_config_fn`에 함수를 제공할 수 " +"있습니다. 제공된 함수는 전략에 의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 합니다. 연합 " +"학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client.fit` 및 " +":code:`client.evaluate` 함수를 반환해야 합니다." + +#: ../../source/how-to-use-strategies.rst:75 +#, fuzzy +msgid "" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." +msgstr "" +":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, 예를 들어 학습 속도를 조정하기 " +"위해 매 라운드마다 이 값을 잠재적으로 변경하는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` " +"함수에서 :code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." + +#: ../../source/how-to-use-strategies.rst:78 +msgid "" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" +msgstr "" +":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구성을 사용자 " +"지정하는 :code:`on_evaluate_config_fn`도 있습니다" + +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" +msgstr "서버 측 평가 구성" + +#: ../../source/how-to-use-strategies.rst:83 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." +msgstr "서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니다." + +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" +"완전한 사용자 지정 전략을 작성하는 것은 조금 더 복잡하지만 유연성이 가장 뛰어납니다. 자세한 내용은 `Implementing " +"Strategies `_ 가이드를 참조하세요." + +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "튜토리얼" + +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/index.rst:75 ../../source/index.rst:79 +msgid "How-to guides" +msgstr "사용 방법 가이드" + +#: ../../source/index.rst:100 +msgid "Legacy example guides" +msgstr "레거시 예제 가이드" + +#: ../../source/index.rst:108 ../../source/index.rst:112 +msgid "Explanations" +msgstr "설명" + +#: None:-1 +msgid "API reference" +msgstr "API 참조" + +#: ../../source/index.rst:138 +msgid "Reference docs" +msgstr "참조 문서" + +#: ../../source/index.rst:154 +msgid "Contributor tutorials" +msgstr "기여자 튜토리얼" + +#: ../../source/index.rst:161 +msgid "Contributor how-to guides" +msgstr "기여자 사용법 가이드" + +#: ../../source/index.rst:173 +msgid "Contributor explanations" +msgstr "기여자 설명" + +#: ../../source/index.rst:179 +msgid "Contributor references" +msgstr "기여자 참조" + +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "연합 학습을 위한 Python 개발을 쉽게 할 수 있는 주요 Flower 프레임워크의 설명서를 확인하세요." + +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "플라워 프레임워크 문서" + +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "Flower 문서에 오신 것을 환영합니다. Flower `_는 편한 연합 학습 프레임워크입니다." + +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Flower 커뮤니티 가입하기" + +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 커뮤니티는 연구원, 엔지니어, 학생, 전문가, 학자 및 기타 애호가들로 구성된 편한 그룹으로 빠르게 성장하고 있습니다." + +#: ../../source/index.rst:15 +msgid "Join us on Slack" +msgstr "Slack에 가입하세요" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 프레임워크" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." +msgstr "" +"이 사용자 가이드는 Flower를 사용해 기존 머신 러닝 워크로드를 연합된 환경으로 가져오고자 하는 연구자와 개발자를 대상으로 " +"합니다. Flower의 설계 목표 중 하나는 이를 간단하게 만드는 것이었습니다. 자세히 알아보려면 계속 읽어보세요." + +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "튜토리얼" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "학습 중심의 연합 학습 튜토리얼 시리즈로, 시작하기에 가장 좋은 곳입니다." + +#: ../../source/index.rst:62 +#, fuzzy +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`🤗 Transformers" +" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " +"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " +":doc:`Android ` | :doc:`iOS `" + +#: ../../source/index.rst:64 +msgid "We also made video tutorials for PyTorch:" +msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" + +#: ../../source/index.rst:69 +msgid "And TensorFlow:" +msgstr "그리고 TensorFlow도:" + +#: ../../source/index.rst:77 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." + +#: ../../source/index.rst:110 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 설명하고 토론합니다." + +#: ../../source/index.rst:121 +msgid "References" +msgstr "참조" + +#: ../../source/index.rst:123 +msgid "Information-oriented API reference and other reference material." +msgstr "정보 지향 API 참조 및 기타 참고 자료." + +#: ../../source/index.rst:132::1 +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" + +#: ../../source/index.rst:132::1 flwr:1 of +msgid "Flower main package." +msgstr "Flower 메인 패키지." + +#: ../../source/index.rst:149 +msgid "Contributor docs" +msgstr "기여자 문서" + +#: ../../source/index.rst:151 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 문서는 그 과정에서 도움을 드리기 위한 문서입니다." + +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 참조" + +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Flower 클라이언트." + +#: ../../flwr:1 +#, fuzzy +msgid "flwr is the Flower command line interface." +msgstr "Flower ClientProxy 인스턴스 등록 해제." + +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Options" +msgstr "해결법" + +#: ../../flwr:1 +#, fuzzy +msgid "Install completion for the current shell." +msgstr "현재 실행에 대한 식별자입니다." + +#: ../../flwr:1 +msgid "" +"Show completion for the current shell, to copy it or customize the " +"installation." +msgstr "" + +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" + +#: ../../flwr build:1 +msgid "" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" +msgstr "" + +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." +msgstr "" + +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" + +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Flower 설치" + +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" +msgstr "" + +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 +msgid "" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" +msgstr "" + +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" + +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" +msgstr "" + +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" + +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" + +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "빌드 전달인자" + +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "선택적 개선 사항" + +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log +msgid "default" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "``True``" +msgstr "``DISTRO``" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "빌드 전달인자" + +#: ../../flwr new:1 +#, fuzzy +msgid "Create new Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr new:1 +msgid "The ML framework to use" +msgstr "" + +#: ../../flwr new +#, fuzzy +msgid "options" +msgstr "해결법" + +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" +msgstr "" + +#: ../../flwr new:1 +msgid "The Flower username of the author" +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" +msgstr "" + +#: ../../flwr run:1 +msgid "" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" +msgstr "" + +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." +msgstr "" + +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" +msgstr "flower 시뮬레이션" + +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower 초연결" + +#: ../../source/ref-api-cli.rst:36 +#, fuzzy +msgid "flower-supernode" +msgstr "Flower SuperNode" + +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" +msgstr "flower 서버 프로그램" + +#: ../../source/ref-api-cli.rst:49 +msgid "" +"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " +"longer supports passing a reference to a `ServerApp` attribute. Instead, " +"you need to pass the path to Flower app via the argument :code:`--app`. " +"This is the path to a directory containing a `pyproject.toml`. You can " +"create a valid Flower app by executing :code:`flwr new` and following the" +" prompt." +msgstr "" + +#: ../../source/ref-api-cli.rst:62 +#, fuzzy +msgid "flower-superexec" +msgstr "flower 초연결" + +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" +msgstr "flwr" + +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" +msgstr "Modules" + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 클라이언트." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니다." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 서버." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." +msgstr "Flower 시뮬레이션." + +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "클라이언트" + +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" +msgstr "함수" + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "Flower 서버에 연결되는 Flower 클라이언트 노드를 시작합니다." + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" +msgstr "클래스" + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." +msgstr "Flower ClientApp." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:50::1 +#, fuzzy +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client.mod `\\" + +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." +msgstr "Flower 내장 모드." + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" + +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" +msgstr "메소드" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 평가합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 구체화합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." +msgstr "이 클라이언트에서 실행 컨텍스트를 가져옵니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "이 클라이언트에 실행 컨텍스트를 적용합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "클라이언트(자체)를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "속성" + +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "파라미터" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 평가 지침입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "반환" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세부 정보가 포함된 평가 결과입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "반환 타입" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 학습 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 학습 지침입니다." + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "업데이트된 매개변수와 훈련에 사용된 로컬 훈련 예제 수와 같은 기타 세부 정보가 포함된 훈련 결과입니다." + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버에서 받은 매개변수 가져오기 명령어입니다." + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "현재 로컬 모델 파라미터입니다." + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져오기 명령입니다." + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "클라이언트앱" + +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" + +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "예시" + +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "" +"일반적인 `Client` 구현의 이름이 `FlowerClient`라고 가정하면, 다음과 같이 `ClientApp`으로 래핑할 수 " +"있습니다:" + +#: flwr.client.client_app.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "위의 코드가 'client'라는 Python 모듈에 있는 경우 다음과 같이 시작할 수 있습니다:" + +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" +"이 `client:app` 예제에서 `client`는 이전 코드가 있는 Python 모듈 `client.py`를 가리키고 " +"`app`는 `ClientApp` 유형의 객체를 가리키는 전역 속성 `app`을 가리킵니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "클라이언트 앱에 평가함수를 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "클라이언트 앱에 query fn을 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr ":py:obj:`train `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "클라이언트 앱에 train fn을 등록하는 데코레이터를 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 파라미터를 학습합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_properties `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "객체를 클라이언트 유형으로 변환하고 반환합니다." + +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "현재(전역) 모델 매개변수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" +"서버가 클라이언트의 평가에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 평가에 사용되는 예제 수에 영향을 주기 위해 서버에서" +" 클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" +"* **loss** (*float*) - 로컬 데이터 세트에서 모델의 평가 손실입니다. * **num_examples** " +"(*int*) -- 평가에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) -- 임의의 " +"문자열 키를 부울, 바이트, float, int 또는 str 유형의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 로컬 데이터 세트에서 모델의 평가 손실입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) - 평가에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" +"**metrics** (*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, int 또는" +" str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" +"이전 반환 유형 형식(int, float, float)과 확장 형식(int, float, float, Dict[str, " +"Scalar])은 Flower 0.19부터 더 이상 사용되지 않으며 제거되었습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" +"서버가 클라이언트의 훈련에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 (로컬) 트레이닝 에포크 수를 설정하는 등 서버에서 " +"클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" +"* **parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다. * **num_examples** " +"(*int*) -- 학습에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) - 임의의 문자열" +" 키를 bool, bytes, float, int,또는 str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다." + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) - 트레이닝에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청한 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 매개변수가 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- 로컬 모델 파라미터를 NumPy 배열 목록으로 표시합니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청하는 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 속성이 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" +"**properties** -- 임의의 문자열 키를 bool, bytes, float, int 또는 str 타입의 값에 매핑하는 " +"dictionary입니다. 임의의 속성 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" +msgstr "mod" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." +msgstr "클라이언트 측 적응형 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." +msgstr "클라이언트 측 고정 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." +msgstr "메시지 크기 수정." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." +msgstr "매개변수 크기 mod." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." +msgstr "SecAgg 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." +msgstr "SecAgg+ 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." +msgstr "로컬 차분 프라이버시를 위한 수정자." + +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" +msgstr "LocalDpMod" + +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 클립하고 서버로 보내기 전에 파라미터에 노이즈를 추가합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "이 함수는 `MessageType.TRAIN` 유형의 메시지에 대해 작동합니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "클리핑 기준값입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "클라이언트 모델의 민감도입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." +msgstr "개인정보 보호 예산. 엡실론 값이 작을수록 개인정보 보호 수준이 높음을 나타냅니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." +msgstr "" +"실패 확률입니다. 프라이버시 메커니즘이 원하는 수준의 프라이버시를 제공하지 못할 확률입니다. 델타 값이 작을수록 프라이버시가 더 " +"엄격하게 보장된다는 의미입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "로컬 DP 모드의 인스턴스를 생성하고 클라이언트 측 모드에 추가합니다:" + +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "adaptiveclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 차분 프라이버시 클라이언트 측 적응형 클리핑과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "래퍼는 클라이언트에 clipping_norm 값을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 서버로 보내기 전에 클립합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "또한 새 클리핑 값을 계산하기 위해 서버로 KEY_NORM_BIT을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" +msgstr "참고" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." +msgstr "여러 개를 사용할 때는 모드의 순서를 고려하세요." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "일반적으로 adaptiveclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" +msgstr "fixedclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 DifferentialPrivacyClientSideFixedClipping과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." +msgstr "일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "make\\_ffn" + +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "message\\_size\\_mod" + +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "이 모드는 전송되는 메시지의 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" +msgstr "parameters\\_size\\_mod" + +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "이 모드는 메시지에서 전송된 매개변수의 수와 그 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "secagg\\_mod" + +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" +msgstr "secaggplus\\_mod" + +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "start\\_client" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" +"서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에서 실행되는 경우 `서버_주소`는 " +"`\"[::]:8080\"`이 됩니다." + +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" + +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" + +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" +"Flower 서버와 교환할 수 있는 gRPC 메시지의 최대 길이입니다. 기본값은 대부분의 모델에 충분합니다. 매우 큰 모델을 " +"훈련하는 사용자는 이 값을 늘려야 할 수도 있습니다. Flower 서버는 동일한 값으로 시작해야 " +"하며(`flwr.server.start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차단합니다." + +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" +"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 사용하여 SSL이 활성화된 Flower 서버에 " +"보안 연결이 설정됩니다." + +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" +"True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None인 경우 시스템 인증서를 " +"사용하여 False일 때 HTTPS 연결을 활성화합니다." + +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" +"전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리밍 - 'grpc-rere': " +"gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" + +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입니다. None으로 설정하면 시도 횟수에 제한이 " +"없습니다." + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니다. None으로 설정하면 총 시간에는 제한이 " +"없습니다." + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" + +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" + +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" + +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" +msgstr "start\\_numpy\\_client" + +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" +"이 함수는 1.7.0부터 더 이상 사용되지 않습니다. 대신 :code:`flwr.client.start_client`를 사용하고 " +"먼저 :code:`to_client()` 메서드를 실행하여 :code:`NumPyClient`를 " +":code:`flwr.client.Client` 유형으로 변환합니다." + +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "추상 베이스 클래스 `flwr.client.NumPyClient`의 구현입니다." + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "공통" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "NumPy에서 배열을 만듭니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "바이트에서 NumPy를 역직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "파일 및/또는 원격 로그 서버에 로깅을 구성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "차단을 피하기 위해 create_event를 ThreadPoolExecutor에 제출합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "정수 심각도 'level'과 함께 'msg % args'를 기록합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "NumPy와 배열을 바이트열로 직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "NumPy 배열을 매개변수 객체로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "표준 시간대를 UTC로 설정하여 time.time()에서 날짜 시간을 생성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "매개변수 객체를 NumPy 배열로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "배열 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage는 하나의 결과 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "클라이언트 상태 코드." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "레코드를 설정합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +#, fuzzy +msgid "Context of your run." +msgstr "실행 상태." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "클라이언트에서 서버로 연결 해제 메시지를 보냅니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "발생한 오류에 대한 정보를 저장하는 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "클라이언트에 대한 지침을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "클라이언트의 응답을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "원격 분석 이벤트의 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "고객을 위한 맞춤 지침." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "클라이언트의 적합성 응답." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "클라이언트에 대한 매개변수 요청입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "매개변수 반환 요청 시 응답합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "클라이언트에 대한 속성 요청." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "클라이언트의 속성 응답을 확인합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "애플리케이션을 사용하는 엔티티의 관점에서 애플리케이션의 상태입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "레거시 메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "현재 메시지와 관련된 메타데이터를 보유한 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "Metrics recod." +msgstr "메트릭 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "모델 매개변수." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "매개변수 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "서버에서 클라이언트로 메시지를 다시 연결합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet은 매개변수, 메트릭 및 설정 그룹을 저장합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage는 하나의 instruction 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "클라이언트 상태." + +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "배열" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "배열형 또는 텐서형 객체의 직렬화된 데이터와 그에 대한 일부 메타데이터를 포함하는 데이터 클래스입니다." + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "직렬화된 객체의 데이터 유형을 나타내는 문자열(예: `np.float32`)" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" +"직렬화되지 않은 배열과 같은 객체의 모양을 나타내는 목록입니다. 직렬화 방법에 따라 데이터를 역직렬화하는 데 사용되거나 단순히 " +"메타데이터 필드로 사용됩니다." + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "배열형 또는 텐서형 객체에서 `데이터`의 바이트를 생성하는 데 사용되는 직렬화 메커니즘의 유형을 나타내는 문자열입니다." + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "데이터를 포함하는 바이트 버퍼입니다." + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr ":py:obj:`numpy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "배열을 NumPy 배열로 반환합니다." + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr ":py:obj:`shape `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr ":py:obj:`stype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" +msgstr "클라이언트 메시지" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" +":py:obj:`get_parameters_res " +"`\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" +":py:obj:`get_properties_res " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "코드" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Config.rst:2 +#, fuzzy +msgid "Config" +msgstr "구성" + +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "컨피그 레코드" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " +"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " +":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" +" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" + +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." +msgstr "이 객체에 저장된 바이트 수를 반환합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "키를 찾을 수 없으면 주어진 경우 d가 반환되고, 그렇지 않으면 KeyError가 발생합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "컨텍스트" + +#: flwr.common.context.Context:3 of +#, fuzzy +msgid "The ID that identifies the node." +msgstr "오류 식별자입니다." + +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" + +#: flwr.common.context.Context:8 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" +"특정 실행에서 엔티티가 추가한 레코드를 보유하며 로컬에 유지됩니다. 즉, 저장된 데이터는 실행 중인 시스템을 벗어나지 않습니다. " +"모드를 실행할 때 중간 저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 다른 시점에서 액세스하기 " +"위한 메모리로도 사용할 수 있습니다(예: 여러 라운드에 걸쳐)" + +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" +msgstr "연결 해제" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "오류" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "오류 식별자입니다." + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "오류가 발생한 이유(예: 예외 스택 추적)" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "오류 코드." + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "오류에 대해 보고된 사유입니다." + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" +msgstr "평가" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" +msgstr "EvaluateRes" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "이벤트 타입" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." +msgstr "인코딩용으로 등록된 코덱을 사용하여 문자열을 인코딩합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "이전 하위 문자열이 모두 새 하위 문자열로 바뀐 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "sep를 구분 문자열로 사용하여 문자열의 하위 문자열 목록을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`join `\\ \\(iterable\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." +msgstr "원하는 수의 문자열을 연결합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`capitalize `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." +msgstr "대문자로 된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`casefold `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "대소문자 구분 없는 비교에 적합한 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`title `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." +msgstr "각 단어의 제목이 대소문자로 구분된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." +msgstr "길이 너비의 가운데 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "문자열 S[start:end]에서 하위 문자열 sub이 겹치지 않는 횟수를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "모든 탭 문자가 공백을 사용하여 확장된 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "하위 문자열 sub이 발견되는 S에서 하위가 S[start:end] 내에 포함되는 가장 낮은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." +msgstr "지정된 구분 기호를 사용하여 문자열을 세 부분으로 분할합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." +msgstr "왼쪽으로 정렬된 길이의 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`lower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." +msgstr "소문자로 변환된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." +msgstr "선행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "부분 문자열 sub이 발견되는 곳에서 sub이 S[start:end] 내에 포함되도록 S에서 가장 높은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." +msgstr "길이 너비의 오른쪽 정렬된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "후행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "문자열의 줄 목록을 줄 경계에서 구분하여 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`strip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "선행 및 후행 공백이 제거된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`swapcase `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "대문자를 소문자로, 소문자를 대문자로 변환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`translate `\\ \\(table\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." +msgstr "주어진 번역 테이블을 사용하여 문자열의 각 문자를 바꿉니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`upper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." +msgstr "Return a copy of the string converted to uppercase." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "S가 지정된 접미사로 끝나면 True를 반환하고 그렇지 않으면 False을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." +msgstr "주어진 접두사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." +msgstr "주어진 접미사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`isascii `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "문자열의 모든 문자가 ASCII인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`islower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "문자열이 소문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`isupper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "문자열이 대문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`istitle `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "문자열이 제목 대/소문자가 구분된 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`isspace `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "문자열이 공백 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`isdecimal `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "문자열이 10진수 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`isdigit `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`isnumeric `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`isalpha `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "문자열이 알파벳 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`isalnum `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "문자열이 영-숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`isidentifier `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "문자열이 유효한 파이썬 식별자인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`isprintable `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." +msgstr "문자열을 인쇄할 수 있으면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`zfill `\\ \\(width\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "숫자 문자열을 왼쪽에 0으로 채워서 지정된 너비의 필드를 채웁니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "args와 kwarg의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`format_map `\\ \\(mapping\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`maketrans `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." +msgstr "str.translate()에 사용할 수 있는 번역 테이블을 반환합니다." + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "보다 구체적으로, 첫 번째 문자는 대문자로, 나머지는 소문자로 만듭니다." + +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "패딩은 지정된 채우기 문자를 사용하여 수행됩니다(기본값은 공백)." + +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "" +"문자열 S[start:end]에서 부분 문자열 sub의 겹치지 않는 횟수를 반환합니다. 선택적 인자 start와 end는 슬라이스" +" 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.encode:3 of +msgid "encoding" +msgstr "인코딩" + +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." +msgstr "문자열을 인코딩합니다." + +#: flwr.common.EventType.encode:9 of +msgid "errors" +msgstr "오류" + +#: flwr.common.EventType.encode:6 of +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" +"인코딩 오류에 사용할 오류 처리 방식입니다. 기본값은 'strict'로, 인코딩 오류가 발생하면 UnicodeEncodeError를" +" 발생시킵니다. 다른 가능한 값으로는 'ignore', 'replace', 'xmlcharrefreplace', 그리고 " +"UnicodeEncodeError를 처리할 수 있는 codecs.register_error에 등록된 다른 이름도 사용할 수 " +"있습니다." + +#: flwr.common.EventType.endswith:1 of +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접미사로 끝나면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치부터 S를 " +"테스트합니다. end 옵션을 사용하면 해당 위치에서 S 비교를 중지합니다. 접미사는 시도할 문자열의 튜플일 수도 있습니다." + +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "탭 크기를 지정하지 않으면 크기가 8로 지정됩니다." + +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 낮은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." +msgstr "실패 시 -1을 반환합니다." + +#: flwr.common.EventType.format:1 of +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "args와 kwargs의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." +msgstr "부분 문자열을 찾을 수 없을 때 ValueError를 발생시킵니다." + +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "문자열의 모든 문자가 영숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 영-숫자입니다." + +#: flwr.common.EventType.isalpha:3 of +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 알파벳이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 알파벳입니다." + +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 문자는 U+0000-U+007F 범위의 코드 포인트가 있습니다. 빈 문자열도 ASCII입니다." + +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "문자열의 모든 문자가 10진수이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 10진수 문자열입니다." + +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자 문자열입니다." + +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "" +"keyword.iskeyword(s)를 호출하여 문자열 s가 \"def\" 또는 \"class\"와 같은 예약 식별자인지 " +"테스트합니다." + +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "문자열이 모두 소문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 소문자입니다." + +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자입니다." + +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "문자열은 repr()에서 모든 문자가 인쇄 가능한 것으로 간주되거나 비어 있는 경우 인쇄할 수 있습니다." + +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 공백이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 공백입니다." + +#: flwr.common.EventType.istitle:3 of +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "제목 대/소문자 문자열에서 대문자와 제목 대문자는 대소문자만, 소문자는 대문자만 뒤에 올 수 있습니다." + +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "문자열의 모든 문자가 대문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 대문자입니다." + +#: flwr.common.EventType.join:3 of +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "메서드가 호출되는 문자열은 주어진 각 문자열 사이에 삽입됩니다. 결과는 새 문자열로 반환됩니다." + +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" + +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "None이 아닌 문자가 지정되면 대신 문자열에서 문자를 제거합니다." + +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." +msgstr "" +"argument이 하나만 있는 경우, 유니코드 서수(정수) 또는 문자를 유니코드 서수, 문자열 또는 None에 매핑하는 " +"dictionary이어야 합니다. 그러면 문자 키가 서수로 변환됩니다. 인수가 두 개이면 길이가 같은 문자열이어야 하며, 결과 " +"dictionary에서 x의 각 문자는 y의 같은 위치에 있는 문자에 매핑됩니다. 세 번째 인수가 있으면 문자열이어야 하며, 그 " +"문자는 결과에서 None에 매핑됩니다." + +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "" +"문자열에서 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 기호 뒤 부분을 포함하는 " +"3-tuple을 반환합니다." + +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "구분 기호를 찾을 수 없으면 원래 문자열과 빈 문자열 2개를 포함하는 3-튜플을 반환합니다." + +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "문자열이 접두사 문자열로 시작하면 문자열[len(prefix):]을 반환합니다. 그렇지 않으면 원본 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "" +"문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본" +" 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.replace:5 of +msgid "count" +msgstr "카운트" + +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "대체할 최대 발생 횟수입니다. -1(기본값)은 모든 항목을 교체한다는 의미입니다." + +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "선택적 argument 개수를 지정하면 첫 번째 개수만 바뀝니다." + +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 높은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "" +"그러면 문자열에서 끝 부분부터 시작하여 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 " +"기호 뒤 부분을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" +msgstr "sep" + +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." +msgstr "문자열을 분할하는 데 사용되는 구분 기호입니다." + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "" +"None(기본값)으로 설정하면 모든 공백 문자(\\\\n \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 " +"문자열을 삭제합니다." + +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "maxsplit" + +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "최대 분할 횟수(왼쪽부터 시작). -1(기본값)은 제한이 없음을 의미합니다." + +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." +msgstr "분할은 문자열 끝에서 시작하여 앞쪽으로 진행됩니다." + +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "" +"참고로 str.split()은 주로 의도적으로 구분된 데이터에 유용합니다. 구두점이 포함된 자연 텍스트의 경우 정규식 모듈을 " +"사용하는 것이 좋습니다." + +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "줄 바꿈은 keepends가 주어지고 참이 아니면 결과 목록에 포함되지 않습니다." + +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치에서 " +"시작되는 S를 테스트합니다. 선택적 end를 사용하면 해당 위치에서 S 비교를 중지합니다. 접두사는 시도할 문자열의 튜플일 수도 " +"있습니다." + +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 표기합니다." + +#: flwr.common.EventType.translate:5 of +msgid "table" +msgstr "table" + +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "유니코드 서수를 유니코드 서수, 문자열 또는 없음으로 매핑하는 번역 테이블이어야 합니다." + +#: flwr.common.EventType.translate:7 of +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." +msgstr "" +"테이블은 사전이나 목록과 같이 __getitem__을 통해 조회/색인을 구현해야 합니다. 이 작업에서 LookupError가 " +"발생하면 문자는 그대로 유지됩니다. 없음으로 매핑된 문자는 삭제됩니다." + +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." +msgstr "문자열은 잘리지 않습니다." + +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" +msgstr "FitIns" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "FitRes" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" +msgstr "GetParametersIns" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" +msgstr "GetParametersRes" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.client.client.Client.evaluate:8 of -msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." -msgstr "" -"로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세" -"부 정보가 포함된 평가 결과입니다." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "반환 타입" +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" +msgstr "Message" -#: flwr.client.client.Client.fit:3 of +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "실행할 메시지에 대한 정보를 포함한 데이터 클래스입니다." + +#: flwr.common.message.Message:5 of msgid "" -"The training instructions containing (global) model parameters received from " -"the server and a dictionary of configuration values used to customize the " -"local training process." -msgstr "" -"서버에서 받은 (전역) 모델 파라미터와 로컬 학습 프로세스를 사용자 지정하는 데 " -"사용되는 구성 값 사전이 포함된 학습 지침입니다." +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "다른 엔터티(예: 서버 측 로직이 클라이언트로 전송하거나 그 반대로 전송하는 등)가 전송했거나 전송할 레코드를 보유합니다." -#: flwr.client.client.Client.fit:8 of +#: flwr.common.message.Message:8 of msgid "" -"The training result containing updated parameters and other details such as " -"the number of local training examples used for training." -msgstr "" -"업데이트된 매개변수와 훈련에 사용된 로컬 훈련 예제 수와 같은 기타 세부 정보" -"가 포함된 훈련 결과입니다." +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "다른 메시지를 처리할 때 발생한 오류에 대한 정보를 캡처하는 데이터 클래스입니다." -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -"구성 값 dictionary이 포함된 서버에서 받은 매개변수 가져오기 명령어입니다." +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "현재 로컬 모델 파라미터입니다." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "오류가 발생했음을 나타내는 답장 메시지를 작성합니다." -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져오기 명령입니다." +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" +msgstr "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "현재 클라이언트 속성입니다." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "지정된 콘텐츠와 TTL을 사용하여 이 메시지에 대한 답글을 작성합니다." -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" -msgstr "클라이언트앱" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" -msgstr "Bases: :py:class:`object`" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "메시지에 콘텐츠가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "메시지에 오류가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of -msgid "Examples" -msgstr "예시" +msgid "The content of this message." +msgstr "이 메시지의 내용입니다." -#: flwr.client.client_app.ClientApp:5 of +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "이 메시지가 캡처한 오류입니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "오류가 발생했습니다." + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -"일반적인 `Client` 구현의 이름이 `FlowerClient`라고 가정하면, 다음과 같이 " -"`ClientApp`으로 래핑할 수 있습니다:" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음과 " +"같은 공식을 따릅니다: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" -#: flwr.client.client_app.ClientApp:16 of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -"If the above code is in a Python module called `client`, it can be started " -"as follows:" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -"위의 코드가 'client'라는 Python 모듈에 있는 경우 다음과 같이 시작할 수 있습니" -"다:" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음 " +"공식을 따릅니다:" -#: flwr.client.client_app.ClientApp:21 of +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" + +#: flwr.common.message.Message.create_reply:3 of msgid "" -"In this `client:app` example, `client` refers to the Python module `client." -"py` in which the previous code lives in and `app` refers to the global " -"attribute `app` that points to an object of type `ClientApp`." +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -"이 `client:app` 예제에서 `client`는 이전 코드가 있는 Python 모듈 `client.py`" -"를 가리키고 `app`는 `ClientApp` 유형의 객체를 가리키는 전역 속성 `app`을 가리" -"킵니다." +"이 메서드는 이 메시지에 대한 응답으로 새로운 '메시지'를 생성합니다. 이 메시지에서 'run_id', 'src_node_id', " +"'dst_node_id', 'message_type'을 상속하고 'reply_to_message'를 이 메시지의 ID로 설정합니다." -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr ":py:obj:`evaluate `\\ \\(\\)" +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "답장 메시지의 콘텐츠입니다." -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "클라이언트 앱에 평가함수를 등록하는 데코레이터를 반환합니다." +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." +msgstr "답장을 나타내는 새로운 `메시지` 인스턴스입니다." -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" -msgstr ":py:obj:`query `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "MessageType" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." -msgstr "클라이언트 앱에 query fn을 등록하는 데코레이터를 반환합니다." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" -msgstr ":py:obj:`train `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." -msgstr "클라이언트 앱에 train fn을 등록하는 데코레이터를 반환합니다." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" -msgstr "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "현재 실행에 대한 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "현재 메시지의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "이 메시지를 보내는 노드의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "이 메시지를 수신하는 노드의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "이 메시지가 회신하는 메시지의 식별자입니다." + +#: flwr.common.message.Metadata:13 of msgid "" -":py:obj:`fit `\\ \\(parameters\\, config\\)" -msgstr "" -":py:obj:`fit `\\ \\(parameters\\, config\\)" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "메시지를 그룹화하기 위한 식별자입니다. 일부 설정에서는 FL 라운드로 사용됩니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 파라미터를 학습합니다." +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." +msgstr "이 메시지의 유효 시간(초)입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "수신 측에서 실행할 작업을 인코딩하는 문자열입니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`created_at `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" -msgstr "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." +msgstr "메시지가 생성된 때의 Unix timestamp입니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" -msgstr "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "클라이언트의 속성 집합을 반환합니다." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`set_context `\\ \\(context\\)" -msgstr "" -":py:obj:`set_context `\\ \\(context\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "메시지를 그룹화하기 위한 식별자입니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "객체를 클라이언트 유형으로 변환하고 반환합니다." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "현재(전역) 모델 매개변수입니다." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of -msgid "" -"Configuration parameters which allow the server to influence evaluation on " -"the client. It can be used to communicate arbitrary values from the server " -"to the client, for example, to influence the number of examples used for " -"evaluation." -msgstr "" -"서버가 클라이언트의 평가에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 " -"평가에 사용되는 예제 수에 영향을 주기 위해 서버에서 클라이언트로 임의의 값을 " -"전달하는 데 사용할 수 있습니다." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of -msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or str. " -"It can be used to communicate arbitrary values back to the server." -msgstr "" -"* **loss** (*float*) - 로컬 데이터 세트에서 모델의 평가 손실입니다. * " -"**num_examples** (*int*) -- 평가에 사용된 예제 수입니다. * **metrics** " -"(*Dict[str, Scalar]*) -- 임의의 문자열 키를 부울, 바이트, float, int 또는 " -"str 유형의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 " -"데 사용할 수 있습니다." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of -msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local dataset." -msgstr "**loss** (*float*) -- 로컬 데이터 세트에서 모델의 평가 손실입니다." +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." +msgstr "이 메시지를 기다리는 시간입니다." -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "**num_examples** (*int*) - 평가에 사용된 예제 수입니다." +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "MetricsRecord" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of -msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " -"keys to values of type bool, bytes, float, int, or str. It can be used to " -"communicate arbitrary values back to the server." -msgstr "" -"**metrics** (*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, " -"int 또는 str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 " -"전달하는 데 사용할 수 있습니다." +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "MetricsRecord" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy msgid "" -"The previous return type format (int, float, float) and the extended format " -"(int, float, float, Dict[str, Scalar]) have been deprecated and removed " -"since Flower 0.19." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -"이전 반환 유형 형식(int, float, float)과 확장 형식(int, float, float, " -"Dict[str, Scalar])은 Flower 0.19부터 더 이상 사용되지 않으며 제거되었습니다." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: flwr.common.record.metricsrecord.MetricsRecord:3 of msgid "" -"Configuration parameters which allow the server to influence training on the " -"client. It can be used to communicate arbitrary values from the server to " -"the client, for example, to set the number of (local) training epochs." +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -"서버가 클라이언트의 훈련에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 " -"(로컬) 트레이닝 에포크 수를 설정하는 등 서버에서 클라이언트로 임의의 값을 전" -"달하는 데 사용할 수 있습니다." -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " -"keys to values of type bool, bytes, float, int, or str. It can be used to " -"communicate arbitrary values back to the server." +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -"* **parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다. * " -"**num_examples** (*int*) -- 학습에 사용된 예제 수입니다. * **metrics** " -"(*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, int,또는 str " -"타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 데 사" -"용할 수 있습니다." - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "**parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다." - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "**num_examples** (*int*) - 트레이닝에 사용된 예제 수입니다." -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -"Configuration parameters requested by the server. This can be used to tell " -"the client which parameters are needed along with some Scalar attributes." +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -"서버에서 요청한 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 매개변" -"수가 필요한지 클라이언트에게 알려주는 데 사용할 수 있습니다." - -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "" -"**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "**parameters** -- 로컬 모델 파라미터를 NumPy 배열 목록으로 표시합니다." -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -"Configuration parameters requested by the server. This can be used to tell " -"the client which properties are needed along with some Scalar attributes." +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -"서버에서 요청하는 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 속성" -"이 필요한지 클라이언트에게 알려주는 데 사용할 수 있습니다." -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of " -"type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -"**properties** -- 임의의 문자열 키를 bool, bytes, float, int 또는 str 타입의 " -"값에 매핑하는 dictionary입니다. 임의의 속성 값을 서버에 다시 전달하는 데 사용" -"할 수 있습니다." - -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" -msgstr "mod" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -msgid "Client-side adaptive clipping modifier." -msgstr "클라이언트 측 적응형 클리핑 수정자." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`fixedclipping_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -":py:obj:`fixedclipping_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -msgid "Client-side fixed clipping modifier." -msgstr "클라이언트 측 고정 클리핑 수정자." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "." +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" -msgstr "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg protocol." -msgstr "SecAgg 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" -msgstr "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg+ protocol." -msgstr "SecAgg+ 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" -msgstr "" -":py:obj:`message_size_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." -msgstr "메시지 크기 수정." +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -msgid "Parameters size mod." -msgstr "매개변수 크기 mod." +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\, " -"sensitivity\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\, " -"sensitivity\\, ...\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -msgid "Modifier for local differential privacy." -msgstr "로컬 차분 프라이버시를 위한 수정자." - -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" -msgstr "LocalDpMod" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of -msgid "" -"This mod clips the client model updates and adds noise to the params before " -"sending them to the server." -msgstr "" -"이 모드는 클라이언트 모델 업데이트를 클립하고 서버로 보내기 전에 파라미터에 " -"노이즈를 추가합니다." +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "NDArray" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." -msgstr "이 함수는 `MessageType.TRAIN` 유형의 메시지에 대해 작동합니다." +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +#, fuzzy +msgid "NDArrays" +msgstr "NDArray" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." -msgstr "클리핑 기준값입니다." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." -msgstr "클라이언트 모델의 민감도입니다." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "ParametersRecord" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -"개인정보 보호 예산. 엡실론 값이 작을수록 개인정보 보호 수준이 높음을 나타냅니" -"다." +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#, fuzzy msgid "" -"The failure probability. The probability that the privacy mechanism fails to " -"provide the desired level of privacy. A smaller value of delta indicates a " -"stricter privacy guarantee." +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -"실패 확률입니다. 프라이버시 메커니즘이 원하는 수준의 프라이버시를 제공하지 못" -"할 확률입니다. 델타 값이 작을수록 프라이버시가 더 엄격하게 보장된다는 의미입" -"니다." +"Arrays라는 이름의 데이터 클래스를 순서대로 저장합니다. 즉, OrderedDict[str, Array]로 항목을 보유합니다. " +"ParametersRecord 객체는 파이토치의 state_dict와 동등한 것으로 볼 수 있지만, 대신 직렬화된 텐서를 " +"보유합니다." -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "" -"Create an instance of the local DP mod and add it to the client-side mods:" -msgstr "로컬 DP 모드의 인스턴스를 생성하고 클라이언트 측 모드에 추가합니다:" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" -msgstr "adaptiveclipping\\_mod" +#: flwr.common.record.parametersrecord.ParametersRecord:12 of +msgid "" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." +msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy wrapper." +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -"이 모드는 서버 측 전략 래퍼인 차분 프라이버시 클라이언트 측 적응형 클리핑과 " -"함께 사용해야 합니다." -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." -msgstr "래퍼는 클라이언트에 clipping_norm 값을 전송합니다." +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +#, fuzzy +msgid "Let's see some examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -"This mod clips the client model updates before sending them to the server." -msgstr "이 모드는 클라이언트 모델 업데이트를 서버로 보내기 전에 클립합니다." +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" +msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." -msgstr "또한 새 클리핑 값을 계산하기 위해 서버로 KEY_NORM_BIT을 전송합니다." - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -msgid "Notes" -msgstr "참고" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." -msgstr "여러 개를 사용할 때는 모드의 순서를 고려하세요." +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" +msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -"Typically, adaptiveclipping_mod should be the last to operate on params." +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -"일반적으로 adaptiveclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니" -"다." -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" -msgstr "fixedclipping\\_mod" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of -msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." -msgstr "" -"이 모드는 서버 측 전략 래퍼인 DifferentialPrivacyClientSideFixedClipping과 함" -"께 사용해야 합니다." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." -msgstr "" -"일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" -msgstr "make\\_ffn" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" -msgstr "message\\_size\\_mod" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." -msgstr "이 모드는 전송되는 메시지의 크기를 바이트 단위로 기록합니다." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 -msgid "parameters\\_size\\_mod" -msgstr "parameters\\_size\\_mod" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"This mod logs the number of parameters transmitted in the message as well as " -"their size in bytes." -msgstr "" -"이 모드는 메시지에서 전송된 매개변수의 수와 그 크기를 바이트 단위로 기록합니" -"다." +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" -msgstr "secagg\\_mod" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 -msgid "secaggplus\\_mod" -msgstr "secaggplus\\_mod" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "역직렬화에 필요한 직렬화된 객체의 메타데이터(예: NumPy 배열)에 해당하는 소량의 바이트도 이 카운팅에 포함될 수 있습니다." -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" -msgstr "run\\_supernode" +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "GetPropertiesRes" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" -msgstr "start\\_client" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "ReconnectIns" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of -msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be `\"[::]:8080\"`." -msgstr "" -"서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에" -"서 실행되는 경우 `서버_주소`는 `\"[::]:8080\"`이 됩니다." +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "RecordSet" -#: flwr.client.app.start_client:9 of +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -"An implementation of the abstract base class `flwr.client.Client` (default: " -"None)" -msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." +msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower " -"server. The default should be sufficient for most models. Users who train " -"very large models might need to increase this value. Note that the Flower " -"server needs to be started with the same value (see `flwr.server." -"start_server`), otherwise it will not know about the increased limit and " -"block larger messages." +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -"Flower 서버와 교환할 수 있는 gRPC 메시지의 최대 길이입니다. 기본값은 대부분" -"의 모델에 충분합니다. 매우 큰 모델을 훈련하는 사용자는 이 값을 늘려야 할 수" -"도 있습니다. Flower 서버는 동일한 값으로 시작해야 하며(`flwr.server." -"start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차" -"단합니다." -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established to " -"an SSL-enabled Flower server." +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 " -"사용하여 SSL이 활성화된 Flower 서버에 보안 연결이 설정됩니다." -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection when " -"False, using system certificates if `root_certificates` is None." +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -"True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None" -"인 경우 시스템 인증서를 사용하여 False일 때 HTTPS 연결을 활성화합니다." -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) " -"- 'rest': HTTP (experimental)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -"전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리" -"밍 - 'grpc-rere': gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" -#: flwr.client.app.start_client:31 of +#: flwr.common.record.recordset.RecordSet:29 of +#, fuzzy +msgid "Let's see an example." +msgstr "몇 가지 예를 살펴보겠습니다:" + +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is no " -"limit to the number of tries." +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -"연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입" -"니다. None으로 설정하면 시도 횟수에 제한이 없습니다." -#: flwr.client.app.start_client:35 of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -"The maximum duration before the client stops trying to connect to the server " -"in case of connection error. If set to None, there is no limit to the total " -"time." +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -"연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니" -"다. None으로 설정하면 총 시간에는 제한이 없습니다." - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" - -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" -msgstr "start\\_numpy\\_client" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "Dictionary holding ConfigsRecord instances." -#: flwr.client.app.start_numpy_client:5 of -msgid "" -"This function is deprecated since 1.7.0. Use :code:`flwr.client." -"start_client` instead and first convert your :code:`NumPyClient` to type :" -"code:`flwr.client.Client` by executing its :code:`to_client()` method." -msgstr "" -"이 함수는 1.7.0부터 더 이상 사용되지 않습니다. 대신 :code:`flwr.client." -"start_client`를 사용하고 먼저 :code:`to_client()` 메서드를 실행하여 :code:" -"`NumPyClient`를 :code:`flwr.client.Client` 유형으로 변환합니다." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "추상 베이스 클래스 `flwr.client.NumPyClient`의 구현입니다." +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "Dictionary holding MetricsRecord instances." -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "공통" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr "" -":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "Dictionary holding ParametersRecord instances." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." -msgstr "NumPy에서 배열을 만듭니다." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" +msgstr "ServerMessage" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "바이트에서 NumPy를 역직렬화합니다." +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +":py:obj:`get_parameters_ins " +"`\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." -msgstr "파일 및/또는 원격 로그 서버에 로깅을 구성합니다." - -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +":py:obj:`get_properties_ins " +"`\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "차단을 피하기 위해 create_event를 ThreadPoolExecutor에 제출합니다." +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" +msgstr "Status" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "정수 심각도 'level'과 함께 'msg % args'를 기록합니다." +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -msgstr "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "NumPy와 배열을 바이트열로 직렬화합니다." +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" +msgstr "구성" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "표준 시간대를 UTC로 설정하여 time.time()에서 날짜 시간을 생성합니다." +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" +msgstr "이벤트" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" +msgstr "로그" + +#: logging.Logger.log:3 of msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "예외 정보를 전달하려면 키워드 argument exc_info를 참 값과 함께 사용합니다." + +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "NumPy 배열을 매개변수 객체로 변환합니다." +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "매개변수 객체를 NumPy 배열로 변환합니다." +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." -msgstr "배열 유형." +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "서버" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "" -"ClientMessage는 하나의 결과 메시지를 저장하는 데 사용되는 컨테이너입니다." +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "gRPC transport layer를 사용하여 Flower 서버를 실행하세요." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "클라이언트 상태 코드." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 클라이언트를 관리하기 위한 Abstract base class." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" msgstr "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." -msgstr "레코드를 설정합니다." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." +msgstr "Driver API를 위한 Abstract base Driver class." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." -msgstr "실행 상태." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." +msgstr "메트릭 콜렉션을 훈련 및 평가하기 위한 History class." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "클라이언트에서 서버로 연결 해제 메시지를 보냅니다." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "레거시 콘텍스트." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "클라이언트에 대한 지침을 평가합니다." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" msgstr "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "클라이언트의 응답을 평가합니다." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "원격 분석 이벤트의 유형." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "고객을 위한 맞춤 지침." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "Flower 서버." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" msgstr "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "클라이언트의 적합성 응답." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." -msgstr "발생한 오류에 대한 정보를 저장하는 데이터 클래스입니다." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`GetParametersIns `\\ \\(config\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -":py:obj:`GetParametersIns `\\ \\(config\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "클라이언트에 대한 매개변수 요청입니다." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." +msgstr "Flower 서버 설정." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "매개변수 반환 요청 시 응답합니다." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." +msgstr "사용 가능한 클라이언트 그룹 제공." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -":py:obj:`GetPropertiesIns `\\ \\(config\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "클라이언트에 대한 속성 요청." +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr ":py:obj:`flwr.server `\\" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "클라이언트의 속성 응답을 확인합니다." +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." -msgstr "애플리케이션을 사용하는 엔티티의 관점에서 애플리케이션의 상태입니다." +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr ":py:obj:`MessageType `\\ \\(\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." -msgstr "메시지 타입." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." -msgstr "레거시 메시지 타입." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " -"src\\_node\\_id\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " -"src\\_node\\_id\\, ...\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." -msgstr "현재 메시지와 관련된 메타데이터를 보유한 데이터 클래스입니다." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "Flower ClientProxy 인스턴스 등록 해제." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." -msgstr "메트릭 기록." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "적어도 1개의 `num_clients` 가 사용 가능해질 때까지 기다리세요." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" -msgstr ":py:obj:`NDArray `\\" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" -"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" -"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "모델 매개변수." +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." -msgstr "매개변수 기록." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "서버에서 클라이언트로 메시지를 다시 연결합니다." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "RecordSet은 매개변수, 메트릭 및 설정 그룹을 저장합니다." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -"ServerMessage는 하나의 instruction 메시지를 저장하는 데 사용되는 컨테이너입니" -"다." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "클라이언트 상태." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" -msgstr "배열" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`flwr.server `\\" -#: flwr.common.record.parametersrecord.Array:3 of -msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." -msgstr "" -"배열형 또는 텐서형 객체의 직렬화된 데이터와 그에 대한 일부 메타데이터를 포함" -"하는 데이터 클래스입니다." +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "시뮬레이션 실행" -#: flwr.common.record.parametersrecord.Array:6 of +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -"A string representing the data type of the serialised object (e.g. `np." -"float32`)" -msgstr "직렬화된 객체의 데이터 유형을 나타내는 문자열(예: `np.float32`)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -"A list representing the shape of the unserialized array-like object. This is " -"used to deserialize the data (depending on the serialization method) or " -"simply as a metadata field." +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -"직렬화되지 않은 배열과 같은 객체의 모양을 나타내는 목록입니다. 직렬화 방법에 " -"따라 데이터를 역직렬화하는 데 사용되거나 단순히 메타데이터 필드로 사용됩니다." -#: flwr.common.record.parametersrecord.Array:12 of +#: flwr.server.driver.driver.Driver.create_message:9 of msgid "" -"A string indicating the type of serialisation mechanism used to generate the " -"bytes in `data` from an array-like or tensor-like object." +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -"배열형 또는 텐서형 객체에서 `데이터`의 바이트를 생성하는 데 사용되는 직렬화 " -"메커니즘의 유형을 나타내는 문자열입니다." -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." -msgstr "데이터를 포함하는 바이트 버퍼입니다." +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr ":py:obj:`numpy `\\ \\(\\)" +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." -msgstr "배열을 NumPy 배열로 반환합니다." +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" -msgstr ":py:obj:`dtype `\\" +#: flwr.server.driver.driver.Driver.create_message:23 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" -msgstr ":py:obj:`shape `\\" +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" -msgstr ":py:obj:`stype `\\" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" -msgstr ":py:obj:`data `\\" +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" -msgstr "클라이언트 메시지" +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" -msgstr ":py:obj:`evaluate_res `\\" +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" -msgstr ":py:obj:`fit_res `\\" +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`get_parameters_res `\\" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -":py:obj:`get_parameters_res `\\" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`get_properties_res `\\" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -":py:obj:`get_properties_res `\\" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" -msgstr "코드" +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`~enum.Enum`" +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" -msgstr ":py:obj:`OK `\\" +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" -msgstr "컨피그 레코드" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` | :py:class:" -"`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ [:py:class:`int`] | :" -"py:class:`~typing.List`\\ [:py:class:`float`] | :py:class:`~typing.List`\\ [:" -"py:class:`str`] | :py:class:`~typing.List`\\ [:py:class:`bytes`] | :py:class:" -"`~typing.List`\\ [:py:class:`bool`]]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`int` | :py:class:`float` | :py:class:`str` | :py:class:`bytes` |" -" :py:class:`bool` | :py:class:`~typing.List`\\ [:py:class:`int`] | " -":py:class:`~typing.List`\\ [:py:class:`float`] | :py:class:`~typing.List`\\ " -"[:py:class:`str`] | :py:class:`~typing.List`\\ [:py:class:`bytes`] | " -":py:class:`~typing.List`\\ [:py:class:`bool`]]" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." -msgstr "R에서 모든 항목을 제거합니다." +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." +msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." -msgstr "이 객체에 저장된 바이트 수를 반환합니다." +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." -msgstr "d는 기본값이 None입니다." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "" -"If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "키를 찾을 수 없으면 주어진 경우 d가 반환되고, 그렇지 않으면 KeyError가 " -"발생합니다." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." -msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." -msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" +msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" -msgstr "컨텍스트" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.context.Context:3 of -msgid "" -"Holds records added by the entity in a given run and that will stay local. " -"This means that the data it holds will never leave the system it's running " -"from. This can be used as an intermediate storage or scratchpad when " -"executing mods. It can also be used as a memory to access at different " -"points during the lifecycle of this entity (e.g. across multiple rounds)" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" msgstr "" -"특정 실행에서 엔티티가 추가한 레코드를 보유하며 로컬에 유지됩니다. 즉, " -"저장된 데이터는 실행 중인 시스템을 벗어나지 않습니다. 모드를 실행할 때 중간 " -"저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 " -"다른 시점에서 액세스하기 위한 메모리로도 사용할 수 있습니다(예: 여러 " -"라운드에 걸쳐)" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" -msgstr ":py:obj:`state `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" -msgstr "연결 해제" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." +msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" -msgstr "오류" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." +msgstr "" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." -msgstr "오류 식별자입니다." +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" +msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "오류가 발생한 이유(예: 예외 스택 추적)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." +msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." -msgstr "오류 코드." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." +msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" +msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." -msgstr "오류에 대해 보고된 사유입니다." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" -msgstr "평가" +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" -msgstr "EvaluateRes" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" -msgstr ":py:obj:`loss `\\" +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" -msgstr "이벤트 타입" +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -":py:obj:`encode `\\ \\(\\[encoding\\, errors\\]" -"\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." -msgstr "인코딩용으로 등록된 코덱을 사용하여 문자열을 인코딩합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." -msgstr "이전 하위 문자열이 모두 새 하위 문자열로 바뀐 사본을 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.serverapp_components.ServerAppComponents:9 of msgid "" -":py:obj:`split `\\ \\(\\[sep\\, maxsplit\\]\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -":py:obj:`split `\\ \\(\\[sep\\, maxsplit\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -"Return a list of the substrings in the string, using sep as the separator " -"string." -msgstr "sep를 구분 문자열로 사용하여 문자열의 하위 문자열 목록을 반환합니다." +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, maxsplit\\]\\)" +":py:obj:`client_manager " +"`\\" msgstr "" -":py:obj:`rsplit `\\ \\(\\[sep\\, maxsplit\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr ":py:obj:`join `\\ \\(iterable\\, \\/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." -msgstr "원하는 수의 문자열을 연결합니다." +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr ":py:obj:`capitalize `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." -msgstr "대문자로 된 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`flwr.server `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr ":py:obj:`casefold `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr ":py:obj:`state `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "대소문자 구분 없는 비교에 적합한 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" -msgstr ":py:obj:`title `\\ \\(\\)" +#: flwr.server.server_config.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." -msgstr "각 단어의 제목이 대소문자로 구분된 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -":py:obj:`center `\\ \\(width\\[\\, fillchar\\]" -"\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." -msgstr "길이 너비의 가운데 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in string " -"S[start:end]." -msgstr "문자열 S[start:end]에서 하위 문자열 sub이 겹치지 않는 횟수를 반환합니다." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`expandtabs `\\ \\(\\[tabsize\\]\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -":py:obj:`expandtabs `\\ \\(\\[tabsize\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "모든 탭 문자가 공백을 사용하여 확장된 사본을 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, end\\]" -"\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub is " -"contained within S[start:end]." -msgstr "하위 문자열 sub이 발견되는 S에서 하위가 S[start:end] 내에 포함되는 가장 낮은 " -"인덱스를 반환합니다." +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`partition `\\ \\(sep\\, \\/\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -":py:obj:`partition `\\ \\(sep\\, \\/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." -msgstr "지정된 구분 기호를 사용하여 문자열을 세 부분으로 분할합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, fillchar\\]\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -":py:obj:`ljust `\\ \\(width\\[\\, fillchar\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." -msgstr "왼쪽으로 정렬된 길이의 문자열을 반환합니다." +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr ":py:obj:`lower `\\ \\(\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." -msgstr "소문자로 변환된 문자열 사본을 반환합니다." +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." -msgstr "선행 공백이 제거된 문자열의 복사본을 반환합니다." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:12 of msgid "" -"Return the highest index in S where substring sub is found, such that sub is " -"contained within S[start:end]." -msgstr "부분 문자열 sub이 발견되는 곳에서 sub이 S[start:end] 내에 포함되도록 S에서 " -"가장 높은 인덱스를 반환합니다." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -":py:obj:`rindex `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, fillchar\\]\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -":py:obj:`rjust `\\ \\(width\\[\\, fillchar\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." -msgstr "길이 너비의 오른쪽 정렬된 문자열을 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." -msgstr "후행 공백이 제거된 문자열의 복사본을 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -":py:obj:`splitlines `\\ \\(\\[keepends\\]\\" -")" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." -msgstr "문자열의 줄 목록을 줄 경계에서 구분하여 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "" -"Return a copy of the string with leading and trailing whitespace removed." -msgstr "선행 및 후행 공백이 제거된 문자열 사본을 반환합니다." +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr ":py:obj:`swapcase `\\ \\(\\)" +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." -msgstr "대문자를 소문자로, 소문자를 대문자로 변환합니다." +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`translate `\\ \\(table\\, \\/\\)" +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -":py:obj:`translate `\\ \\(table\\, \\/\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." -msgstr "주어진 번역 테이블을 사용하여 문자열의 각 문자를 바꿉니다." +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr ":py:obj:`upper `\\ \\(\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." -msgstr "Return a copy of the string converted to uppercase." +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -":py:obj:`startswith `\\ \\(prefix\\[\\, " -"start\\[\\, end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." -msgstr "S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -":py:obj:`endswith `\\ \\(suffix\\[\\, start\\" -"[\\, end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." -msgstr "S가 지정된 접미사로 끝나면 True를 반환하고 그렇지 않으면 False을 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`removeprefix `\\ \\(prefix\\, " -"\\/\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -":py:obj:`removeprefix `\\ \\(prefix\\, \\" -"/\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." -msgstr "주어진 접두사 문자열이 있는 경우 제거된 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`removesuffix `\\ \\(suffix\\, " -"\\/\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`removesuffix `\\ \\(suffix\\, \\" -"/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." -msgstr "주어진 접미사 문자열이 있는 경우 제거된 문자열을 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr ":py:obj:`isascii `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "문자열의 모든 문자가 ASCII인 경우 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr ":py:obj:`islower `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." -msgstr "문자열이 소문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr ":py:obj:`isupper `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "문자열이 대문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr ":py:obj:`istitle `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "문자열이 제목 대/소문자가 구분된 문자열이면 True를 반환하고, 그렇지 않으면 " -"False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr ":py:obj:`isspace `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." -msgstr "문자열이 공백 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr ":py:obj:`isdecimal `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "문자열이 10진수 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr ":py:obj:`isdigit `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." -msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr ":py:obj:`isnumeric `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr ":py:obj:`isalpha `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "문자열이 알파벳 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr ":py:obj:`isalnum `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "문자열이 영-숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr ":py:obj:`isidentifier `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Return True if the string is a valid Python identifier, False otherwise." -msgstr "문자열이 유효한 파이썬 식별자인 경우 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr ":py:obj:`isprintable `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." -msgstr "문자열을 인쇄할 수 있으면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given " -"width." -msgstr "숫자 문자열을 왼쪽에 0으로 채워서 지정된 너비의 필드를 채웁니다." +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`format `\\ \\(\\*args\\, \\*\\*" -"kwargs\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Return a formatted version of S, using substitutions from args and kwargs." -msgstr "args와 kwarg의 치환을 사용하여 형식이 지정된 S를 반환합니다." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr ":py:obj:`format_map `\\ \\(mapping\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" -msgstr ":py:obj:`maketrans `\\" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." -msgstr "str.translate()에 사용할 수 있는 번역 테이블을 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" -msgstr ":py:obj:`PING `\\" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`START_CLIENT_ENTER `\\" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`START_CLIENT_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`START_CLIENT_LEAVE `\\" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -":py:obj:`START_CLIENT_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`START_SERVER_ENTER `\\" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -":py:obj:`START_SERVER_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`START_SERVER_LEAVE `\\" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`START_SERVER_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`RUN_DRIVER_API_ENTER `\\" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`RUN_DRIVER_API_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE `\\" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`RUN_DRIVER_API_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`RUN_FLEET_API_ENTER `\\" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -":py:obj:`RUN_FLEET_API_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE `\\" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -":py:obj:`RUN_FLEET_API_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE `\\" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -":py:obj:`START_SIMULATION_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE `\\" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -":py:obj:`START_SIMULATION_LEAVE `\\" - -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr ":py:obj:`DRIVER_CONNECT `\\" - -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_DRIVER_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -":py:obj:`START_DRIVER_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_DRIVER_LEAVE `\\" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -":py:obj:`START_DRIVER_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -":py:obj:`RUN_CLIENT_APP_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE `\\" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -":py:obj:`RUN_CLIENT_APP_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." msgstr "" -":py:obj:`RUN_SERVER_APP_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE `\\" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -":py:obj:`RUN_SERVER_APP_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_ENTER `\\" +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -":py:obj:`RUN_SUPERNODE_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE `\\" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -":py:obj:`RUN_SUPERNODE_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." msgstr "" -":py:obj:`RUN_SUPEREXEC_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE `\\" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`RUN_SUPEREXEC_LEAVE `\\" - -#: flwr.common.EventType.capitalize:3 of -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "보다 구체적으로, 첫 번째 문자는 대문자로, 나머지는 소문자로 만듭니다." - -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "" -"Padding is done using the specified fill character (default is a space)." -msgstr "패딩은 지정된 채우기 문자를 사용하여 수행됩니다(기본값은 공백)." -#: flwr.common.EventType.count:1 of -msgid "" -"Return the number of non-overlapping occurrences of substring sub in string " -"S[start:end]. Optional arguments start and end are interpreted as in slice " -"notation." +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." msgstr "" -"문자열 S[start:end]에서 부분 문자열 sub의 겹치지 않는 횟수를 반환합니다. " -"선택적 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." - -#: flwr.common.EventType.encode:3 of -msgid "encoding" -msgstr "인코딩" - -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." -msgstr "문자열을 인코딩합니다." - -#: flwr.common.EventType.encode:9 of -msgid "errors" -msgstr "오류" -#: flwr.common.EventType.encode:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as " -"any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"인코딩 오류에 사용할 오류 처리 방식입니다. 기본값은 'strict'로, 인코딩 " -"오류가 발생하면 UnicodeEncodeError를 발생시킵니다. 다른 가능한 값으로는 " -"'ignore', 'replace', 'xmlcharrefreplace', 그리고 UnicodeEncodeError를 처리할 " -"수 있는 codecs.register_error에 등록된 다른 이름도 사용할 수 있습니다." -#: flwr.common.EventType.endswith:1 of -msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, stop " -"comparing S at that position. suffix can also be a tuple of strings to try." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." msgstr "" -"S가 지정된 접미사로 끝나면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다. 시작 옵션을 사용하면 해당 위치부터 S를 테스트합니다. end 옵션을 " -"사용하면 해당 위치에서 S 비교를 중지합니다. 접미사는 시도할 문자열의 튜플일 " -"수도 있습니다." - -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." -msgstr "탭 크기를 지정하지 않으면 크기가 8로 지정됩니다." -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub is " -"contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"부분 문자열 sub가 발견되는 곳의 가장 낮은 인덱스를 반환하며, sub는 " -"S[start:end] 내에 포함되어야 합니다. 선택적 인자 start와 end는 슬라이스 " -"표기법과 같이 해석됩니다." - -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." -msgstr "실패 시 -1을 반환합니다." - -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and kwargs. " -"The substitutions are identified by braces ('{' and '}')." -msgstr "args와 kwargs의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 " -"중괄호('{' 및 '}')로 식별됩니다." - -#: flwr.common.EventType.format_map:1 of -msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 " -"'}')로 식별됩니다." - -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." -msgstr "부분 문자열을 찾을 수 없을 때 ValueError를 발생시킵니다." -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-numeric " -"and there is at least one character in the string." -msgstr "문자열의 모든 문자가 영숫자이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 영-숫자입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" -#: flwr.common.EventType.isalpha:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is alphabetic if all characters in the string are alphabetic and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 알파벳이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 알파벳입니다." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.EventType.isascii:3 of -msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty string " -"is ASCII too." -msgstr "ASCII 문자는 U+0000-U+007F 범위의 코드 포인트가 있습니다. 빈 문자열도 " -"ASCII입니다." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" -#: flwr.common.EventType.isdecimal:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is a decimal string if all characters in the string are decimal and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 10진수이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 10진수 문자열입니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"숫자 문자열입니다." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "" -#: flwr.common.EventType.isidentifier:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved identifier, " -"such as \"def\" or \"class\"." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"keyword.iskeyword(s)를 호출하여 문자열 s가 \"def\" 또는 \"class\"와 같은 " -"예약 식별자인지 테스트합니다." -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase " -"and there is at least one cased character in the string." -msgstr "문자열이 모두 소문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 " -"소문자입니다." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" -#: flwr.common.EventType.isnumeric:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is numeric if all characters in the string are numeric and there is " -"at least one character in the string." -msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"숫자입니다." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.EventType.isprintable:3 of -msgid "" -"A string is printable if all of its characters are considered printable in " -"repr() or if it is empty." -msgstr "문자열은 repr()에서 모든 문자가 인쇄 가능한 것으로 간주되거나 비어 있는 경우 " -"인쇄할 수 있습니다." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" -#: flwr.common.EventType.isspace:3 of -msgid "" -"A string is whitespace if all characters in the string are whitespace and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 공백이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"공백입니다." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow " -"uncased characters and lowercase characters only cased ones." -msgstr "제목 대/소문자 문자열에서 대문자와 제목 대문자는 대소문자만, 소문자는 " -"대문자만 뒤에 올 수 있습니다." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" -#: flwr.common.EventType.isupper:3 of -msgid "" -"A string is uppercase if all cased characters in the string are uppercase " -"and there is at least one cased character in the string." -msgstr "문자열의 모든 문자가 대문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 " -"대문자입니다." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" -#: flwr.common.EventType.join:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"The string whose method is called is inserted in between each given string. " -"The result is returned as a new string." -msgstr "메서드가 호출되는 문자열은 주어진 각 문자열 사이에 삽입됩니다. 결과는 새 " -"문자열로 반환됩니다." - -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -msgstr "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "None이 아닌 문자가 지정되면 대신 문자열에서 문자를 제거합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "" -#: flwr.common.EventType.maketrans:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the same " -"position in y. If there is a third argument, it must be a string, whose " -"characters will be mapped to None in the result." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"argument이 하나만 있는 경우, 유니코드 서수(정수) 또는 문자를 유니코드 서수, " -"문자열 또는 None에 매핑하는 dictionary이어야 합니다. 그러면 문자 키가 서수로 " -"변환됩니다. 인수가 두 개이면 길이가 같은 문자열이어야 하며, 결과 " -"dictionary에서 x의 각 문자는 y의 같은 위치에 있는 문자에 매핑됩니다. 세 번째 " -"인수가 있으면 문자열이어야 하며, 그 문자는 결과에서 None에 매핑됩니다." -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -"문자열에서 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, " -"구분 기호 자체, 구분 기호 뒤 부분을 포함하는 3-tuple을 반환합니다." -#: flwr.common.EventType.partition:7 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." -msgstr "구분 기호를 찾을 수 없으면 원래 문자열과 빈 문자열 2개를 포함하는 3-튜플을 " -"반환합니다." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.removeprefix:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If the string starts with the prefix string, return string[len(prefix):]. " -"Otherwise, return a copy of the original string." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"문자열이 접두사 문자열로 시작하면 문자열[len(prefix):]을 반환합니다. 그렇지 " -"않으면 원본 문자열의 복사본을 반환합니다." -#: flwr.common.EventType.removesuffix:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 " -"문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본 문자열의 복사본을 " -"반환합니다." -#: flwr.common.EventType.replace:5 of -msgid "count" -msgstr "카운트" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" -#: flwr.common.EventType.replace:4 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." -msgstr "대체할 최대 발생 횟수입니다. -1(기본값)은 모든 항목을 교체한다는 의미입니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.replace:7 of -msgid "" -"If the optional argument count is given, only the first count occurrences " -"are replaced." -msgstr "선택적 argument 개수를 지정하면 첫 번째 개수만 바뀝니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of -msgid "" -"Return the highest index in S where substring sub is found, such that sub is " -"contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." msgstr "" -"부분 문자열 sub가 발견되는 곳의 가장 높은 인덱스를 반환하며, sub는 " -"S[start:end] 내에 포함되어야 합니다. 선택적 인자 start와 end는 슬라이스 " -"표기법과 같이 해석됩니다." -#: flwr.common.EventType.rpartition:3 of -msgid "" -"This will search for the separator in the string, starting at the end. If " -"the separator is found, returns a 3-tuple containing the part before the " -"separator, the separator itself, and the part after it." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -"그러면 문자열에서 끝 부분부터 시작하여 구분 기호를 검색합니다. 구분 기호가 " -"발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 기호 뒤 부분을 포함하는 3-" -"tuple을 반환합니다." -#: flwr.common.EventType.rpartition:7 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." -msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-" -"tuple을 반환합니다." +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" -msgstr "sep" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." -msgstr "문자열을 분할하는 데 사용되는 구분 기호입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"When set to None (the default value), will split on any whitespace character " -"(including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard empty " -"strings from the result." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"None(기본값)으로 설정하면 모든 공백 문자(\\\\n" -" \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 문자열을 삭제합니다." -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" -msgstr "maxsplit" - -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value) " -"means no limit." -msgstr "최대 분할 횟수(왼쪽부터 시작). -1(기본값)은 제한이 없음을 의미합니다." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." -msgstr "분할은 문자열 끝에서 시작하여 앞쪽으로 진행됩니다." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" -#: flwr.common.EventType.split:13 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using the " -"regular expression module." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"참고로 str.split()은 주로 의도적으로 구분된 데이터에 유용합니다. 구두점이 " -"포함된 자연 텍스트의 경우 정규식 모듈을 사용하는 것이 좋습니다." -#: flwr.common.EventType.splitlines:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Line breaks are not included in the resulting list unless keepends is given " -"and true." -msgstr "줄 바꿈은 keepends가 주어지고 참이 아니면 결과 목록에 포함되지 않습니다." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.startswith:1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, stop " -"comparing S at that position. prefix can also be a tuple of strings to try." +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -"S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다. 시작 옵션을 사용하면 해당 위치에서 시작되는 S를 테스트합니다. " -"선택적 end를 사용하면 해당 위치에서 S 비교를 중지합니다. 접두사는 시도할 " -"문자열의 튜플일 수도 있습니다." -#: flwr.common.EventType.title:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"More specifically, words start with uppercased characters and all remaining " -"cased characters have lower case." -msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 " -"표기합니다." - -#: flwr.common.EventType.translate:5 of -msgid "table" -msgstr "table" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.EventType.translate:4 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode " -"ordinals, strings, or None." -msgstr "유니코드 서수를 유니코드 서수, 문자열 또는 없음으로 매핑하는 번역 " -"테이블이어야 합니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.translate:7 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character is " -"left untouched. Characters mapped to None are deleted." +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -"테이블은 사전이나 목록과 같이 __getitem__을 통해 조회/색인을 구현해야 " -"합니다. 이 작업에서 LookupError가 발생하면 문자는 그대로 유지됩니다. " -"없음으로 매핑된 문자는 삭제됩니다." - -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." -msgstr "문자열은 잘리지 않습니다." - -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" -msgstr "FitIns" - -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" - -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" -msgstr "FitRes" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" - -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" -msgstr "GetParametersIns" - -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" - -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" -msgstr "GetParametersRes" - -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" - -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" -msgstr "GetPropertiesIns" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" -msgstr "GetPropertiesRes" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" -msgstr ":py:obj:`properties `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" -msgstr "Message" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." -msgstr "실행할 메시지에 대한 정보를 포함한 데이터 클래스입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" -#: flwr.common.message.Message:5 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side " -"logic to a client, or vice-versa) or that will be sent to it." -msgstr "다른 엔터티(예: 서버 측 로직이 클라이언트로 전송하거나 그 반대로 전송하는 등)" -"가 전송했거나 전송할 레코드를 보유합니다." +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" -#: flwr.common.message.Message:8 of -msgid "" -"A dataclass that captures information about an error that took place when " -"processing another message." -msgstr "다른 메시지를 처리할 때 발생한 오류에 대한 정보를 캡처하는 데이터 " -"클래스입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -":py:obj:`create_error_reply `\\ \\(" -"error\\[\\, ttl\\]\\)" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." -msgstr "오류가 발생했음을 나타내는 답장 메시지를 작성합니다." -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -":py:obj:`create_reply `\\ \\(content\\[\\, " -"ttl\\]\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -":py:obj:`create_reply `\\ \\(content\\[\\, " -"ttl\\]\\)" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." -msgstr "지정된 콘텐츠와 TTL을 사용하여 이 메시지에 대한 답글을 작성합니다." - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr ":py:obj:`has_content `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." -msgstr "메시지에 콘텐츠가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr ":py:obj:`has_error `\\ \\(\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." -msgstr "메시지에 오류가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" -msgstr ":py:obj:`content `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of -msgid "The content of this message." -msgstr "이 메시지의 내용입니다." +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" -msgstr ":py:obj:`error `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." -msgstr "이 메시지가 캡처한 오류입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" -msgstr ":py:obj:`metadata `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." -msgstr "오류가 발생했습니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based on " -"the remaining time for the received message before it expires. This follows " -"the equation: ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta." -"created_at)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 " -"전까지 남은 시간을 기준으로 설정됩니다. 이는 다음과 같은 공식을 따릅니다: " -"ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based on " -"the remaining time for the received message before it expires. This follows " -"the equation:" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 " -"전까지 남은 시간을 기준으로 설정됩니다. 이는 다음 공식을 따릅니다:" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"The method generates a new `Message` as a reply to this message. It inherits " -"'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from this message " -"and sets 'reply_to_message' to the ID of this message." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -"이 메서드는 이 메시지에 대한 응답으로 새로운 '메시지'를 생성합니다. 이 " -"메시지에서 'run_id', 'src_node_id', 'dst_node_id', 'message_type'을 상속하고 " -"'reply_to_message'를 이 메시지의 ID로 설정합니다." -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." -msgstr "답장 메시지의 콘텐츠입니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." -msgstr "답장을 나타내는 새로운 `메시지` 인스턴스입니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" -msgstr "MessageType" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" -msgstr ":py:obj:`EVALUATE `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" -msgstr ":py:obj:`QUERY `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" -msgstr ":py:obj:`TRAIN `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." +msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" -msgstr "MessageTypeLegacy" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`GET_PARAMETERS `\\" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`GET_PARAMETERS `\\" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`GET_PROPERTIES `\\" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`GET_PROPERTIES `\\" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." -msgstr "현재 실행에 대한 식별자입니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." -msgstr "현재 메시지의 식별자입니다." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." -msgstr "이 메시지를 보내는 노드의 식별자입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." -msgstr "이 메시지를 수신하는 노드의 식별자입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." -msgstr "이 메시지가 회신하는 메시지의 식별자입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An identifier for grouping messages. In some settings, this is used as the " -"FL round." -msgstr "메시지를 그룹화하기 위한 식별자입니다. 일부 설정에서는 FL 라운드로 " -"사용됩니다." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." -msgstr "이 메시지의 유효 시간(초)입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "수신 측에서 실행할 작업을 인코딩하는 문자열입니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.message.Metadata:21 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An identifier that can be used when loading a particular data partition for " -"a ClientApp. Making use of this identifier is more relevant when conducting " -"simulations." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"클라이언트 앱의 특정 데이터 파티션을 로드할 때 사용할 수 있는 식별자입니다. " -"시뮬레이션을 수행할 때 이 식별자를 사용하는 것이 더 적절합니다." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" -msgstr ":py:obj:`created_at `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." -msgstr "메시지가 생성된 때의 Unix timestamp입니다." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" -msgstr ":py:obj:`dst_node_id `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" -msgstr ":py:obj:`group_id `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." -msgstr "메시지를 그룹화하기 위한 식별자입니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" -msgstr ":py:obj:`message_id `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" -msgstr ":py:obj:`message_type `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" -msgstr ":py:obj:`partition_id `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" -msgstr ":py:obj:`reply_to_message `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" -msgstr ":py:obj:`run_id `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" -msgstr ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" -msgstr ":py:obj:`ttl `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of -msgid "Time-to-live for this message." -msgstr "이 메시지를 기다리는 시간입니다." +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" -msgstr "MetricsRecord" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`int` | :py:class:`float` | :py:class:`~typing.List`\\ [:py:" -"class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`]]" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`int` | :py:class:`float` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`]]" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" -msgstr "NDArray" +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" -msgstr ":py:obj:`tensors `\\" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" -msgstr ":py:obj:`tensor_type `\\" +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" -msgstr "ParametersRecord" +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A dataclass storing named Arrays in order. This means that it holds entries " -"as an OrderedDict[str, Array]. ParametersRecord objects can be viewed as an " -"equivalent to PyTorch's state_dict, but holding serialised tensors instead." +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"Arrays라는 이름의 데이터 클래스를 순서대로 저장합니다. 즉, OrderedDict[str, " -"Array]로 항목을 보유합니다. ParametersRecord 객체는 파이토치의 state_dict와 " -"동등한 것으로 볼 수 있지만, 대신 직렬화된 텐서를 보유합니다." - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`count_bytes `\\ \\(\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`count_bytes `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*" -"F\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Note that a small amount of Bytes might also be included in this counting " -"that correspond to metadata of the serialized object (e.g. of NumPy array) " -"needed for deseralization." -msgstr "역직렬화에 필요한 직렬화된 객체의 메타데이터(예: NumPy 배열)에 해당하는 " -"소량의 바이트도 이 카운팅에 포함될 수 있습니다." - -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" -msgstr "ReconnectIns" - -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" -msgstr ":py:obj:`seconds `\\" - -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" -msgstr "RecordSet" - -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" -msgstr ":py:obj:`configs_records `\\" - -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." -msgstr "Dictionary holding ConfigsRecord instances." - -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" -msgstr ":py:obj:`metrics_records `\\" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." -msgstr "Dictionary holding MetricsRecord instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`parameters_records `\\" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`parameters_records `\\" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." -msgstr "Dictionary holding ParametersRecord instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" -msgstr "ServerMessage" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" -msgstr ":py:obj:`evaluate_ins `\\" +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" -msgstr ":py:obj:`fit_ins `\\" +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`get_parameters_ins `\\" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -":py:obj:`get_parameters_ins `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`get_properties_ins `\\" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`get_properties_ins `\\" -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" -msgstr "Status" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" -msgstr ":py:obj:`message `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" -msgstr "array\\_from\\_numpy" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" -msgstr "bytes\\_to\\_ndarray" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" -msgstr "구성" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" -msgstr "이벤트" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" +msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" -msgstr "로그" +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" -#: logging.Logger.log:3 of +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"To pass exception information, use the keyword argument exc_info with a true " -"value, e.g." -msgstr "예외 정보를 전달하려면 키워드 argument exc_info를 참 값과 함께 사용합니다." +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\, " -"round\\_timeout\\]\\)" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`SimpleClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_available `\\ \\(\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`sample `\\ \\(num\\_clients\\[\\, " -"min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`unregister `\\ \\(client\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any reason." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_message `\\ \\(content\\, " -"message\\_type\\, ...\\[\\, ttl\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`push_messages `\\ \\(messages\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"This method constructs a new `Message` with given content and metadata. The " -"`run_id` and `src_node_id` will be set automatically." +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"The content for the new message. This holds records that are to be sent to " -"the destination node." +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"The type of the message, defining the action to be executed on the receiving " -"end." +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some settings, " -"this is used as the FL round." +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"Time-to-live for the round trip of this message, i.e., the time from sending " -"this message to receiving a reply. It specifies in seconds the duration for " -"which the message and its potential reply are considered valid. If unset, " -"the default TTL (i.e., `common.DEFAULT_TTL`) will be used." +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This method is used to collect messages from the SuperLink that correspond " -"to a set of given message IDs." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An iterable of message IDs for which reply messages are to be retrieved." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This method takes an iterable of messages and sends each message to the node " -"specified in `dst_node_id`." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, which " -"can be used to pull replies." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This method sends a list of messages to their destination node IDs and then " -"waits for the replies. It continues to pull replies until either all replies " -"are received or the specified timeout duration is exceeded." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the method " -"will wait until replies for all messages are received." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "" -"**replies** -- An iterable of reply messages received from the SuperLink." +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages` to " -"collect the replies. If `timeout` is set, the method may not return replies " -"for all sent messages. A message remains valid until its TTL, which is not " -"affected by `timeout`." +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized `\\ " -"\\(server\\_round\\, loss\\)" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`add_loss_distributed `\\ " -"\\(server\\_round\\, loss\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`add_metrics_centralized `\\ \\(server\\_round\\, metrics\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`add_metrics_distributed `\\ \\(server\\_round\\, metrics\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`add_metrics_distributed_fit `\\ \\(server\\_round\\, ...\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`disconnect_all_clients `\\ \\(timeout\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\, " -"timeout\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -":py:obj:`set_strategy `\\ \\(strategy\\)" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"All attributes have default values which allows users to configure just the " -"ones they care about." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`num_available `\\ " -"\\(\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`register `\\ \\(client\\)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Blocks until the requested number of clients is available or until a timeout " -"is reached. Current timeout default: 1 day." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: flwr.server.strategy.krum.Krum:17 of +msgid "" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass thereof. " -"If no instance is provided, then `start_server` will create one." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.strategy." -"Strategy`. If no strategy is provided, then `start_server` will use `flwr." -"server.strategy.FedAvg`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`. If " -"no implementation is provided, then `start_server` will use `flwr.server." -"client_manager.SimpleClientManager`." +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.app.start_server:21 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower " -"clients. The default should be sufficient for most models. Users who train " -"very large models might need to increase this value. Note that the Flower " -"clients need to be started with the same value (see `flwr.client." -"start_client`), otherwise clients will not know about the increased limit " -"and block larger messages." +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Tuple containing root certificate, server certificate, and private key to " -"start a secure SSL-enabled server. The tuple is expected to have three bytes " -"elements in the following order: * CA certificate. * server " -"certificate. * server private key." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Tuple containing root certificate, server certificate, and private key to " -"start a secure SSL-enabled server. The tuple is expected to have three bytes " -"elements in the following order:" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, fraction\\_fit\\, " -"fraction\\_evaluate\\, ...\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping `\\ \\(...\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping `\\ \\(...\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping `\\ \\(...\\)" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping `\\ \\(...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`FedAvgAndroid `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." +msgstr "" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -":py:obj:`FedTrimmedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -":py:obj:`FaultTolerantFedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of +msgid "" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, fraction\\_fit\\, " -"fraction\\_evaluate\\, ...\\]\\)" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, q\\_param\\, " -"qffl\\_learning\\_rate\\, ...\\]\\)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of +msgid "" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of +msgid "" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of +msgid "" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +msgid "" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of +msgid "" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of +msgid "" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of +msgid "" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"Byzantine resilient aggregation rule that is used as the first step of the " -"Bulyan (e.g., Krum)" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." +msgstr "" + +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: flwr.simulation.run_simulation.run_simulation:21 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: ../../source/ref-changelog.md:9 +msgid "" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:13 msgid "" -":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: ../../source/ref-changelog.md:15 +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:17 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: ../../source/ref-changelog.md:19 +msgid "" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:21 msgid "" -":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:23 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +#: ../../source/ref-changelog.md:25 msgid "" -"Evaluate model parameters using an evaluation function from the strategy." +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:27 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: ../../source/ref-changelog.md:29 +msgid "" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: ../../source/ref-changelog.md:41 msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this list, " -"it means that this `ClientProxy` will not participate in the next round of " -"federated evaluation." -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:47 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:49 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:53 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:55 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy (DP)." +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:59 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:60 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: ../../source/ref-changelog.md:61 msgid "" -"Configuration of the next training round includes information related to DP, " -"such as clip norm and noise stddev." +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: ../../source/ref-changelog.md:63 msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list identifies " -"a `ClientProxy` and the `FitIns` for this particular `ClientProxy`. If a " -"particular `ClientProxy` is not included in this list, it means that this " -"`ClientProxy` will not participate in the next round of federated learning." +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: ../../source/ref-changelog.md:65 +msgid "" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: ../../source/ref-changelog.md:68 msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `adaptiveclipping_mod`." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: ../../source/ref-changelog.md:69 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: ../../source/ref-changelog.md:70 +msgid "" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: ../../source/ref-changelog.md:74 msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of +#: ../../source/ref-changelog.md:76 msgid "" -"The desired quantile of updates which should be clipped. Defaults to 0.5." +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: ../../source/ref-changelog.md:78 msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. Andrew " -"et al. recommends to set to 0.2." +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: ../../source/ref-changelog.md:80 msgid "" -"The stddev of the noise added to the count of updates currently below the " -"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: ../../source/ref-changelog.md:82 +msgid "" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: ../../source/ref-changelog.md:84 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideAdaptiveClipping` " -"wrapper:" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" +msgstr "" + +#: ../../source/ref-changelog.md:86 +msgid "" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:88 +msgid "" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:90 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:102 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: ../../source/ref-changelog.md:104 +msgid "" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:113 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:115 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:117 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." +msgstr "" + +#: ../../source/ref-changelog.md:121 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +#: ../../source/ref-changelog.md:123 +msgid "" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: ../../source/ref-changelog.md:135 msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen on " -"the client-side, usually by using the built-in `fixedclipping_mod`." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: ../../source/ref-changelog.md:137 msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A value " -"of 1.0 or higher is recommended for strong privacy." +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" +msgstr "" + +#: ../../source/ref-changelog.md:145 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:149 +msgid "" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:151 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:153 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:155 +msgid "" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:157 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:159 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:161 msgid "" -":py:obj:`evaluate `\\ \\(server\\_round\\, " -"parameters\\)" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:163 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: ../../source/ref-changelog.md:165 +msgid "" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-changelog.md:167 msgid "" -"The standard deviation of the noise added to the count of updates below the " -"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: ../../source/ref-changelog.md:169 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:171 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:173 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:175 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:177 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:179 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:181 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: ../../source/ref-changelog.md:183 +msgid "" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: ../../source/ref-changelog.md:185 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:187 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:189 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-changelog.md:191 +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:193 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:195 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:197 msgid "" -":py:obj:`evaluate `\\ \\(server\\_round\\, " -"parameters\\)" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:199 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:201 +msgid "" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-changelog.md:203 +msgid "" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:209 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:211 msgid "" -":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-changelog.md:213 +msgid "" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:215 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" +msgstr "" + +#: ../../source/ref-changelog.md:221 msgid "" -":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:223 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:225 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:229 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:231 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" +#: ../../source/ref-changelog.md:233 +msgid "" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: ../../source/ref-changelog.md:235 +msgid "" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-changelog.md:243 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:247 +msgid "" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:249 +msgid "" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-changelog.md:251 +msgid "" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:253 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:255 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:257 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:259 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:261 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:263 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:265 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:267 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: ../../source/ref-changelog.md:269 +msgid "" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: ../../source/ref-changelog.md:271 +msgid "" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: ../../source/ref-changelog.md:273 +msgid "" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:275 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:277 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:279 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:281 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:283 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:285 +msgid "" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" +msgstr "" + +#: ../../source/ref-changelog.md:287 +msgid "" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" + +#: ../../source/ref-changelog.md:289 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" +msgstr "" + +#: ../../source/ref-changelog.md:291 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:293 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:295 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: ../../source/ref-changelog.md:297 +msgid "" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-changelog.md:299 +msgid "" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of +#: ../../source/ref-changelog.md:301 msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will still " -"be sampled. Defaults to 1.0." +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of +#: ../../source/ref-changelog.md:303 msgid "" -"Fraction of clients used during validation. In case `min_evaluate_clients` " -"is larger than `fraction_evaluate * available_clients`, " -"`min_evaluate_clients` will still be sampled. Defaults to 1.0." +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-changelog.md:307 +msgid "" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" +msgstr "" + +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:313 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:315 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:317 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:319 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:321 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:325 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:327 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: ../../source/ref-changelog.md:329 +msgid "" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:331 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:333 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:335 msgid "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:343 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " +msgstr "" + +#: ../../source/ref-changelog.md:347 +msgid "" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" +msgstr "" + +#: ../../source/ref-changelog.md:349 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:351 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:353 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:355 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:357 msgid "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-changelog.md:359 +msgid "" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:361 msgid "" -":py:obj:`ndarrays_to_parameters `\\ \\(ndarrays\\)" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:363 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:365 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:367 msgid "" -":py:obj:`parameters_to_ndarrays `\\ \\(parameters\\)" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." +#: ../../source/ref-changelog.md:369 +msgid "" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: ../../source/ref-changelog.md:371 +msgid "" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-changelog.md:373 +msgid "" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: ../../source/ref-changelog.md:375 msgid "" -"Server-side learning rate used in server-side optimization. Defaults to 1.0." +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-changelog.md:377 +msgid "" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:379 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:381 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:383 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:385 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:387 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:389 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +msgstr "" + +#: ../../source/ref-changelog.md:391 +msgid "" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" +msgstr "" + +#: ../../source/ref-changelog.md:393 +msgid "" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." +msgstr "" + +#: ../../source/ref-changelog.md:395 +msgid "" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" + +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:407 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:411 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-changelog.md:413 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:415 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:417 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: ../../source/ref-changelog.md:419 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:421 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:423 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:425 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:427 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:429 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:431 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-changelog.md:433 +msgid "" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:435 +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:437 +msgid "" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:439 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:441 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:443 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:445 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:447 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:449 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: ../../source/ref-changelog.md:455 +msgid "" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client needs " -"to be adjusted. A proximal term needs to be added to the loss function " -"during the training:" +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of -msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-changelog.md:462 +msgid "" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: ../../source/ref-changelog.md:464 msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: ../../source/ref-changelog.md:466 msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to be " -"closer to the server parameters during training)." +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:468 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:470 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" +msgstr "" + +#: ../../source/ref-changelog.md:474 +msgid "" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:476 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:478 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:480 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:482 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:484 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:486 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-changelog.md:488 +msgid "" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: ../../source/ref-changelog.md:490 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-changelog.md:492 +msgid "" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:500 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:504 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: ../../source/ref-changelog.md:506 +msgid "" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:508 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:510 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:512 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:514 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:516 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:518 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:522 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:526 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: ../../source/ref-changelog.md:528 +msgid "" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:530 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:532 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:534 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:536 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:538 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:540 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:548 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:550 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:552 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:554 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:570 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:572 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:574 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:576 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:578 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:580 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:586 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: ../../source/ref-changelog.md:588 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-changelog.md:590 +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: ../../source/ref-changelog.md:592 +msgid "" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:600 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:604 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:606 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:608 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:610 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:612 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:614 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:616 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: ../../source/ref-changelog.md:618 +msgid "" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-changelog.md:620 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: flwr.server.strategy.krum.Krum:17 of +#: ../../source/ref-changelog.md:622 msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in " -"that case classical Krum is applied." +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:624 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" +msgstr "" + +#: ../../source/ref-changelog.md:628 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: ../../source/ref-changelog.md:630 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:632 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:634 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:636 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:638 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:640 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:642 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-changelog.md:644 +msgid "" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:646 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:648 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:650 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:652 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:654 +msgid "" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:656 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:658 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:660 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:662 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:666 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: ../../source/ref-changelog.md:668 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:670 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:684 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:688 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:690 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: ../../source/ref-changelog.md:692 +msgid "" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:694 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: ../../source/ref-changelog.md:696 +msgid "" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-changelog.md:698 msgid "" -"Successful updates from the previously selected and configured clients. Each " -"pair of `(ClientProxy, FitRes` constitutes a successful update from one of " -"the previously selected clients. Not that not all previously selected " -"clients are necessarily included in this list: a client might drop out and " -"not submit a result. For each client that did not submit an update, there " -"should be an `Exception` in `failures`." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +#: ../../source/ref-changelog.md:700 msgid "" -"Exceptions that occurred while the server was waiting for client updates." +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: ../../source/ref-changelog.md:702 msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-changelog.md:704 msgid "" -"Successful updates from the previously selected and configured clients. Each " -"pair of `(ClientProxy, FitRes)` constitutes a successful update from one of " -"the previously selected clients. Not that not all previously selected " -"clients are necessarily included in this list: a client might drop out and " -"not submit a result. For each client that did not submit an update, there " -"should be an `Exception` in `failures`." +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-changelog.md:706 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the previous " -"parameters with the ones returned from this method). If `None` is returned " -"(e.g., because there were only failures and no viable results) then the " -"server will no update the previous model parameters, the updates received in " -"this round are discarded, and the global model parameters remain the same." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: ../../source/ref-changelog.md:708 msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: ../../source/ref-changelog.md:710 msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple containing " -"loss and a dictionary containing task-specific metrics (e.g., accuracy)." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-changelog.md:712 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: ../../source/ref-changelog.md:714 +msgid "" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:716 msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-changelog.md:718 +msgid "" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:720 msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-changelog.md:722 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:724 msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-changelog.md:726 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: ../../source/ref-changelog.md:730 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-changelog.md:732 msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors owned " -"by multiple parties, without accessing any individual integer vector. This " -"workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and a " -"weighted version of the locally updated parameters, both of which are masked " -"for privacy. Specifically, each client uploads \"[w, w * params]\" with " -"masks, where weighting factor 'w' is the number of examples ('num_examples') " -"and 'params' represents the model parameters ('parameters') from the " -"client's `FitRes`. The server then aggregates these contributions to compute " -"the weighted average of model parameters." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-changelog.md:734 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-changelog.md:748 msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-changelog.md:752 msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: ../../source/ref-changelog.md:754 msgid "" -"Only the aggregated model parameters are exposed and passed to `Strategy." -"aggregate_fit`, ensuring individual data privacy." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of +#: ../../source/ref-changelog.md:756 msgid "" -"The number of shares into which each client's private key is split under the " -"SecAgg+ protocol. If specified as a float, it represents the proportion of " -"all selected clients, and the number of shares will be set dynamically in " -"the run time. A private key can be reconstructed from these shares, allowing " -"for the secure aggregation of model updates. Each client sends one share to " -"each of its neighbors while retaining one." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: ../../source/ref-changelog.md:758 msgid "" -"The minimum number of shares required to reconstruct a client's private key, " -"or, if specified as a float, it represents the proportion of the total " -"number of shares needed for reconstruction. This threshold ensures privacy " -"by allowing for the recovery of contributions from dropped clients during " -"aggregation, without compromising individual client data." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of +#: ../../source/ref-changelog.md:760 msgid "" -"The maximum value of the weight that can be assigned to any single client's " -"update during the weighted average calculation on the server side, e.g., in " -"the FedAvg algorithm." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "" + +#: ../../source/ref-changelog.md:764 msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within [-" -"clipping_range, clipping_range], facilitating quantization." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: ../../source/ref-changelog.md:766 msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: ../../source/ref-changelog.md:768 msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. Please " -"use 2**n values for `modulus_range` to prevent overflow issues." +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of +#: ../../source/ref-changelog.md:770 msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for " -"replies for this duration each time. If `None`, there is no time limit and " -"the workflow will wait until replies for all messages are received." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: ../../source/ref-changelog.md:772 msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` means " -"better privacy guarantees but less tolerance to dropouts." +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of +#: ../../source/ref-changelog.md:774 msgid "" -"Too large `max_weight` may compromise the precision of the quantization." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: ../../source/ref-changelog.md:776 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: ../../source/ref-changelog.md:778 msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in the " -"runtime. This allows for dynamic adjustment based on the total number of " -"participating clients." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-changelog.md:780 msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted as " -"the proportion of the number of shares needed for the reconstruction of a " -"private key. This feature enables flexibility in setting the security " -"threshold relative to the number of distributed shares." +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of +#: ../../source/ref-changelog.md:782 msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+ " -"protocol." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:784 msgid "" -":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: ../../source/ref-changelog.md:786 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:788 msgid "" -":py:obj:`setup_stage `\\ \\(driver\\, context\\, state\\)" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: ../../source/ref-changelog.md:792 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:806 msgid "" -":py:obj:`unmask_stage `\\ \\(driver\\, context\\, state\\)" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +#: ../../source/ref-changelog.md:810 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: ../../source/ref-changelog.md:812 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-changelog.md:814 msgid "" -"Bases: :py:class:`~flwr.server.workflow.secure_aggregation." -"secaggplus_workflow.SecAggPlusWorkflow`" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: ../../source/ref-changelog.md:816 msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned by " -"multiple parties, without accessing any individual integer vector. This " -"workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and a " -"weighted version of the locally updated parameters, both of which are masked " -"for privacy. Specifically, each client uploads \"[w, w * params]\" with " -"masks, where weighting factor 'w' is the number of examples ('num_examples') " -"and 'params' represents the model parameters ('parameters') from the " -"client's `FitRes`. The server then aggregates these contributions to compute " -"the weighted average of model parameters." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-changelog.md:818 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg configuration " -"to clients and collect their public keys. - 'share keys': Broadcast public " -"keys among clients and collect encrypted secret" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: ../../source/ref-changelog.md:820 msgid "" -"Each client's private key is split into N shares under the SecAgg protocol, " -"where N is the number of selected clients." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-changelog.md:822 msgid "" -"Generally, higher `reconstruction_threshold` means better privacy guarantees " -"but less tolerance to dropouts." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: ../../source/ref-changelog.md:823 msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in setting " -"the security threshold relative to the number of selected clients." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: ../../source/ref-changelog.md:824 msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:825 +msgid "" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:827 msgid "" -":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:829 msgid "" -":py:obj:`setup_stage `\\ " -"\\(driver\\, context\\, state\\)" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:831 msgid "" -":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:833 msgid "" -":py:obj:`unmask_stage `\\ " -"\\(driver\\, context\\, state\\)" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: ../../source/ref-changelog.md:835 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:837 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\, " -"client\\_fn\\[\\, ...\\]\\)" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: ../../source/ref-changelog.md:839 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:841 msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: ../../source/ref-changelog.md:843 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" +#: ../../source/ref-changelog.md:845 +msgid "" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: ../../source/ref-changelog.md:847 msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of +#: ../../source/ref-changelog.md:849 msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive " -"messages sent by the `ServerApp`." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-changelog.md:851 msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in the " -"ServerApp and receive a Message describing what the ClientApp should perform." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: ../../source/ref-changelog.md:853 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: ../../source/ref-changelog.md:855 msgid "" -"'A dictionary, e.g {\"\": , \"\": } to configure a " -"backend. Values supported in are those included by `flwr.common." -"typing.ConfigsRecordValues`." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-changelog.md:859 msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. This " -"is desirable if you make use of a TensorFlow model on your `ServerApp` while " -"having your `ClientApp` running on the same GPU. Without enabling this, you " -"might encounter an out-of-memory error because TensorFlow, by default, " -"allocates all GPU memory. Read more about how `tf.config.experimental." -"set_memory_growth()` works in the TensorFlow documentation: https://www." -"tensorflow.org/api/stable." +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: ../../source/ref-changelog.md:863 msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If " -"enabled, DEBUG-level logs will be displayed." +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-changelog.md:873 msgid "" -"A function creating client instances. The function must take a single `str` " -"argument called `cid`. It should return a single client instance of type " -"Client. Note that the created client instances are ephemeral and will often " -"be destroyed after a single method invocation. Since client instances are " -"not long-lived, they should not attempt to carry state over method " -"invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load evaluation " -"data in the `evaluate` method itself)." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: ../../source/ref-changelog.md:875 msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: ../../source/ref-changelog.md:879 msgid "" -"List `client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: ../../source/ref-changelog.md:881 msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` and " -"`num_gpus`. To understand the GPU utilization caused by `num_gpus`, as well " -"as using custom resources, please consult the Ray documentation." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: ../../source/ref-changelog.md:883 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: ../../source/ref-changelog.md:885 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If no " -"strategy is provided, then `start_server` will use `flwr.server.strategy." -"FedAvg`." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: ../../source/ref-changelog.md:887 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`. If " -"no implementation is provided, then `start_simulation` will use `flwr.server." -"client_manager.SimpleClientManager`." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:889 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to ray.init." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:891 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "" - -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: ../../source/ref-changelog.md:893 msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any arguments " -"from being passed to ray.init." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: ../../source/ref-changelog.md:895 msgid "" -"Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`." +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: ../../source/ref-changelog.md:897 msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: ../../source/ref-changelog.md:899 msgid "" -"If you want to create your own Actor classes, you might need to pass some " -"input argument. You can use this dictionary for such purpose." +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: ../../source/ref-changelog.md:901 msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for the " -"VCE to choose in which node the actor is placed. If you are an advanced user " -"needed more control you can use lower-level scheduling strategies to pin " -"actors to specific compute nodes (e.g. via NodeAffinitySchedulingStrategy). " -"Please note this is an advanced feature. For all details, please refer to " -"the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/" -"index.html" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: ../../source/ref-changelog.md:903 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-changelog.md:905 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "v1.9.0 (2024-06-10)" +#: ../../source/ref-changelog.md:907 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" +#: ../../source/ref-changelog.md:909 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: ../../source/ref-changelog.md:911 msgid "" -"We would like to give our special thanks to all the contributors who made " -"the new version of Flower possible (in `git shortlog` order):" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/ref-changelog.md:9 +#: ../../source/ref-changelog.md:913 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, " -"`Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`, `Robert " -"Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " -msgstr "" - -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/ref-changelog.md:13 +#: ../../source/ref-changelog.md:915 msgid "" -"**Introduce built-in authentication (preview)** ([#2946](https://github.com/" -"adap/flower/pull/2946), [#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), [#2917](https://github." -"com/adap/flower/pull/2917), [#3386](https://github.com/adap/flower/" -"pull/3386), [#3308](https://github.com/adap/flower/pull/3308), [#3001]" -"(https://github.com/adap/flower/pull/3001), [#3409](https://github.com/adap/" -"flower/pull/3409), [#2999](https://github.com/adap/flower/pull/2999), [#2979]" -"(https://github.com/adap/flower/pull/2979), [#3389](https://github.com/adap/" -"flower/pull/3389), [#3503](https://github.com/adap/flower/pull/3503), [#3366]" -"(https://github.com/adap/flower/pull/3366), [#3357](https://github.com/adap/" -"flower/pull/3357))" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/ref-changelog.md:15 +#: ../../source/ref-changelog.md:917 msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to connect " -"Flower to external authentication systems. With this release, the SuperLink " -"can authenticate SuperNodes using a built-in authentication system. A new " -"[how-to guide](https://flower.ai/docs/framework/how-to-authenticate-" -"supernodes.html) and a new [code example](https://github.com/adap/flower/" -"tree/main/examples/flower-authentication) help you to get started." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:919 msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/ref-changelog.md:19 +#: ../../source/ref-changelog.md:921 msgid "" -"**Introduce end-to-end Docker support** ([#3483](https://github.com/adap/" -"flower/pull/3483), [#3266](https://github.com/adap/flower/pull/3266), [#3390]" -"(https://github.com/adap/flower/pull/3390), [#3283](https://github.com/adap/" -"flower/pull/3283), [#3285](https://github.com/adap/flower/pull/3285), [#3391]" -"(https://github.com/adap/flower/pull/3391), [#3403](https://github.com/adap/" -"flower/pull/3403), [#3458](https://github.com/adap/flower/pull/3458), [#3533]" -"(https://github.com/adap/flower/pull/3533), [#3453](https://github.com/adap/" -"flower/pull/3453), [#3486](https://github.com/adap/flower/pull/3486), [#3290]" -"(https://github.com/adap/flower/pull/3290))" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/ref-changelog.md:21 +#: ../../source/ref-changelog.md:923 msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower " -"SuperNode, and the Flower `ServerApp`. This set of images enables you to run " -"all Flower components in Docker. Check out the new [how-to guide](https://" -"flower.ai/docs/framework/how-to-run-flower-using-docker.html) to get stated." +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-changelog.md:925 msgid "" -"**Re-architect Flower Next simulation engine** ([#3307](https://github.com/" -"adap/flower/pull/3307), [#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), [#3273](https://github." -"com/adap/flower/pull/3273), [#3417](https://github.com/adap/flower/" -"pull/3417), [#3281](https://github.com/adap/flower/pull/3281), [#3343]" -"(https://github.com/adap/flower/pull/3343), [#3326](https://github.com/adap/" -"flower/pull/3326))" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/ref-changelog.md:25 -msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves the " -"reliability of simulations, especially in notebook environments. This is a " -"significant step towards a complete overhaul of the Flower Next simulation " -"architecture." +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/ref-changelog.md:27 -msgid "" -"**Upgrade simulation engine** ([#3354](https://github.com/adap/flower/" -"pull/3354), [#3378](https://github.com/adap/flower/pull/3378), [#3262]" -"(https://github.com/adap/flower/pull/3262), [#3435](https://github.com/adap/" -"flower/pull/3435), [#3501](https://github.com/adap/flower/pull/3501), [#3482]" -"(https://github.com/adap/flower/pull/3482), [#3494](https://github.com/adap/" -"flower/pull/3494))" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" msgstr "" -#: ../../source/ref-changelog.md:29 -msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to use " -"Ray 2.10." +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/ref-changelog.md:31 -msgid "" -"**Introduce FedPFT baseline** ([#3268](https://github.com/adap/flower/" -"pull/3268))" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/ref-changelog.md:33 -msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication " -"costs while delivering high performing models. This is work led by Mahdi " -"Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated " -"Learning with Foundation Models\" ([arxiv](https://arxiv.org/abs/2402.01862))" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-changelog.md:938 msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** ([#3291](https://github.com/adap/" -"flower/pull/3291), [#3139](https://github.com/adap/flower/pull/3139), [#3284]" -"(https://github.com/adap/flower/pull/3284), [#3251](https://github.com/adap/" -"flower/pull/3251), [#3376](https://github.com/adap/flower/pull/3376), [#3287]" -"(https://github.com/adap/flower/pull/3287))" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:942 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" + +#: ../../source/ref-changelog.md:948 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" + +#: ../../source/ref-changelog.md:950 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" + +#: ../../source/ref-changelog.md:952 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's favorite " -"way of creating new Flower projects. This release introduces additional " -"`flwr new` templates for Apple MLX, Hugging Face Transformers, scikit-learn " -"and TensorFlow. In addition to that, existing templates also received " -"updates." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:954 msgid "" -"**Refine** `RecordSet` **API** ([#3209](https://github.com/adap/flower/" -"pull/3209), [#3331](https://github.com/adap/flower/pull/3331), [#3334]" -"(https://github.com/adap/flower/pull/3334), [#3335](https://github.com/adap/" -"flower/pull/3335), [#3375](https://github.com/adap/flower/pull/3375), [#3368]" -"(https://github.com/adap/flower/pull/3368))" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/ref-changelog.md:41 +#: ../../source/ref-changelog.md:956 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:958 msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), [#3461](https://github." -"com/adap/flower/pull/3461), [#3360](https://github.com/adap/flower/" -"pull/3360), [#3433](https://github.com/adap/flower/pull/3433))" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-changelog.md:45 -msgid "" -"Logs received a substantial update. Not only are logs now much nicer to look " -"at, but they are also more configurable." +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"**Improve reliability** ([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), [#3566](https://github." -"com/adap/flower/pull/3566), [#3462](https://github.com/adap/flower/" -"pull/3462), [#3225](https://github.com/adap/flower/pull/3225), [#3514]" -"(https://github.com/adap/flower/pull/3514), [#3535](https://github.com/adap/" -"flower/pull/3535), [#3372](https://github.com/adap/flower/pull/3372))" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/ref-changelog.md:49 -msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/ref-changelog.md:51 +#: ../../source/ref-changelog.md:964 msgid "" -"**Update Swift and C++ SDKs** ([#3321](https://github.com/adap/flower/" -"pull/3321), [#2763](https://github.com/adap/flower/pull/2763))" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:966 msgid "" -"In the C++ SDK, communication-related code is now separate from main client " -"logic. A new abstract class `Communicator` has been introduced alongside a " -"gRPC implementation of it." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/ref-changelog.md:55 -msgid "" -"**Improve testing, tooling and CI/CD infrastructure** ([#3294](https://" -"github.com/adap/flower/pull/3294), [#3282](https://github.com/adap/flower/" -"pull/3282), [#3311](https://github.com/adap/flower/pull/3311), [#2878]" -"(https://github.com/adap/flower/pull/2878), [#3333](https://github.com/adap/" -"flower/pull/3333), [#3255](https://github.com/adap/flower/pull/3255), [#3349]" -"(https://github.com/adap/flower/pull/3349), [#3400](https://github.com/adap/" -"flower/pull/3400), [#3401](https://github.com/adap/flower/pull/3401), [#3399]" -"(https://github.com/adap/flower/pull/3399), [#3346](https://github.com/adap/" -"flower/pull/3346), [#3398](https://github.com/adap/flower/pull/3398), [#3397]" -"(https://github.com/adap/flower/pull/3397), [#3347](https://github.com/adap/" -"flower/pull/3347), [#3502](https://github.com/adap/flower/pull/3502), [#3387]" -"(https://github.com/adap/flower/pull/3387), [#3542](https://github.com/adap/" -"flower/pull/3542), [#3396](https://github.com/adap/flower/pull/3396), [#3496]" -"(https://github.com/adap/flower/pull/3496), [#3465](https://github.com/adap/" -"flower/pull/3465), [#3473](https://github.com/adap/flower/pull/3473), [#3484]" -"(https://github.com/adap/flower/pull/3484), [#3521](https://github.com/adap/" -"flower/pull/3521), [#3363](https://github.com/adap/flower/pull/3363), [#3497]" -"(https://github.com/adap/flower/pull/3497), [#3464](https://github.com/adap/" -"flower/pull/3464), [#3495](https://github.com/adap/flower/pull/3495), [#3478]" -"(https://github.com/adap/flower/pull/3478), [#3271](https://github.com/adap/" -"flower/pull/3271))" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/ref-changelog.md:57 +#: ../../source/ref-changelog.md:970 msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:972 msgid "" -"**Improve documentation** ([#3530](https://github.com/adap/flower/" -"pull/3530), [#3539](https://github.com/adap/flower/pull/3539), [#3425]" -"(https://github.com/adap/flower/pull/3425), [#3520](https://github.com/adap/" -"flower/pull/3520), [#3286](https://github.com/adap/flower/pull/3286), [#3516]" -"(https://github.com/adap/flower/pull/3516), [#3523](https://github.com/adap/" -"flower/pull/3523), [#3545](https://github.com/adap/flower/pull/3545), [#3498]" -"(https://github.com/adap/flower/pull/3498), [#3439](https://github.com/adap/" -"flower/pull/3439), [#3440](https://github.com/adap/flower/pull/3440), [#3382]" -"(https://github.com/adap/flower/pull/3382), [#3559](https://github.com/adap/" -"flower/pull/3559), [#3432](https://github.com/adap/flower/pull/3432), [#3278]" -"(https://github.com/adap/flower/pull/3278), [#3371](https://github.com/adap/" -"flower/pull/3371), [#3519](https://github.com/adap/flower/pull/3519), [#3267]" -"(https://github.com/adap/flower/pull/3267), [#3204](https://github.com/adap/" -"flower/pull/3204), [#3274](https://github.com/adap/flower/pull/3274))" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:974 msgid "" -"As always, the Flower documentation has received many updates. Notable new " -"pages include:" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:976 msgid "" -"[How-to upgrate to Flower Next (Flower Next migration guide)](https://flower." -"ai/docs/framework/how-to-upgrade-to-flower-next.html)" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:978 msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-run-" -"flower-using-docker.html)" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/ref-changelog.md:67 +#: ../../source/ref-changelog.md:980 msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-api/flwr.client." -"mod.html#module-flwr.client.mod)" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:982 msgid "" -"**General updates to Flower Examples** ([#3205](https://github.com/adap/" -"flower/pull/3205), [#3226](https://github.com/adap/flower/pull/3226), [#3211]" -"(https://github.com/adap/flower/pull/3211), [#3252](https://github.com/adap/" -"flower/pull/3252), [#3427](https://github.com/adap/flower/pull/3427), [#3410]" -"(https://github.com/adap/flower/pull/3410), [#3426](https://github.com/adap/" -"flower/pull/3426), [#3228](https://github.com/adap/flower/pull/3228), [#3342]" -"(https://github.com/adap/flower/pull/3342), [#3200](https://github.com/adap/" -"flower/pull/3200), [#3202](https://github.com/adap/flower/pull/3202), [#3394]" -"(https://github.com/adap/flower/pull/3394), [#3488](https://github.com/adap/" -"flower/pull/3488), [#3329](https://github.com/adap/flower/pull/3329), [#3526]" -"(https://github.com/adap/flower/pull/3526), [#3392](https://github.com/adap/" -"flower/pull/3392), [#3474](https://github.com/adap/flower/pull/3474), [#3269]" -"(https://github.com/adap/flower/pull/3269))" -msgstr "" - -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/ref-changelog.md:73 -msgid "" -"**General improvements** ([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), [#3565](https://github." -"com/adap/flower/pull/3565), [#3296](https://github.com/adap/flower/" -"pull/3296), [#3305](https://github.com/adap/flower/pull/3305), [#3246]" -"(https://github.com/adap/flower/pull/3246), [#3224](https://github.com/adap/" -"flower/pull/3224), [#3475](https://github.com/adap/flower/pull/3475), [#3297]" -"(https://github.com/adap/flower/pull/3297), [#3317](https://github.com/adap/" -"flower/pull/3317), [#3429](https://github.com/adap/flower/pull/3429), [#3196]" -"(https://github.com/adap/flower/pull/3196), [#3534](https://github.com/adap/" -"flower/pull/3534), [#3240](https://github.com/adap/flower/pull/3240), [#3365]" -"(https://github.com/adap/flower/pull/3365), [#3407](https://github.com/adap/" -"flower/pull/3407), [#3563](https://github.com/adap/flower/pull/3563), [#3344]" -"(https://github.com/adap/flower/pull/3344), [#3330](https://github.com/adap/" -"flower/pull/3330), [#3436](https://github.com/adap/flower/pull/3436), [#3300]" -"(https://github.com/adap/flower/pull/3300), [#3327](https://github.com/adap/" -"flower/pull/3327), [#3254](https://github.com/adap/flower/pull/3254), [#3253]" -"(https://github.com/adap/flower/pull/3253), [#3419](https://github.com/adap/" -"flower/pull/3419), [#3289](https://github.com/adap/flower/pull/3289), [#3208]" -"(https://github.com/adap/flower/pull/3208), [#3245](https://github.com/adap/" -"flower/pull/3245), [#3319](https://github.com/adap/flower/pull/3319), [#3203]" -"(https://github.com/adap/flower/pull/3203), [#3423](https://github.com/adap/" -"flower/pull/3423), [#3352](https://github.com/adap/flower/pull/3352), [#3292]" -"(https://github.com/adap/flower/pull/3292), [#3261](https://github.com/adap/" -"flower/pull/3261))" -msgstr "" - -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/ref-changelog.md:77 -msgid "**Deprecate Python 3.8 support**" +#: ../../source/ref-changelog.md:986 +msgid "" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/ref-changelog.md:79 +#: ../../source/ref-changelog.md:988 msgid "" -"Python 3.8 will stop receiving security fixes in [October 2024](https://" -"devguide.python.org/versions/). Support for Python 3.8 is now deprecated and " -"will be removed in an upcoming release." +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/ref-changelog.md:81 +#: ../../source/ref-changelog.md:990 msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-api` " -"([#3416](https://github.com/adap/flower/pull/3416), [#3420](https://github." -"com/adap/flower/pull/3420))" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:83 +#: ../../source/ref-changelog.md:992 msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api` " -"and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/ref-changelog.md:85 +#: ../../source/ref-changelog.md:994 msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` ([#3518](https://" -"github.com/adap/flower/pull/3518))" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:87 +#: ../../source/ref-changelog.md:996 msgid "" -"The commands `flower-server-app` and `flower-client-app` should use `--" -"superlink` instead of the now deprecated `--server`. Support for `--server` " -"will be removed in a future release." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" +#: ../../source/ref-changelog.md:998 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/ref-changelog.md:91 +#: ../../source/ref-changelog.md:1000 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** `--" -"ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` ([#3512]" -"(https://github.com/adap/flower/pull/3512), [#3408](https://github.com/adap/" -"flower/pull/3408))" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/ref-changelog.md:93 +#: ../../source/ref-changelog.md:1002 msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, `--ssl-" -"certfile` and `--ssl-keyfile`) with one value each. Check out the [SSL " -"connections](https://flower.ai/docs/framework/how-to-enable-ssl-connections." -"html) documentation page for details." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-changelog.md:1004 msgid "" -"**Remove SuperLink** `--vce` **option** ([#3513](https://github.com/adap/" -"flower/pull/3513))" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/ref-changelog.md:97 +#: ../../source/ref-changelog.md:1008 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for simulation, " -"simulations must now be started using the single `flower-simulation` command." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/ref-changelog.md:99 +#: ../../source/ref-changelog.md:1010 msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** ([#3527]" -"(https://github.com/adap/flower/pull/3527))" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/ref-changelog.md:101 +#: ../../source/ref-changelog.md:1012 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of CLI " -"options for gRPC and REST were merged into one unified set of options. " -"Consult the [Flower CLI reference documentation](https://flower.ai/docs/" -"framework/ref-api-cli.html) for details." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/ref-changelog.md:103 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-changelog.md:1014 +msgid "" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-changelog.md:109 +#: ../../source/ref-changelog.md:1016 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear Ashimine`, " -"`Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, `Sebastian van der " -"Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, `tabdar-khan` " +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:1018 msgid "" -"**Introduce Flower Next high-level API (stable)** ([#3002](https://github." -"com/adap/flower/pull/3002), [#2934](https://github.com/adap/flower/" -"pull/2934), [#2958](https://github.com/adap/flower/pull/2958), [#3173]" -"(https://github.com/adap/flower/pull/3173), [#3174](https://github.com/adap/" -"flower/pull/3174), [#2923](https://github.com/adap/flower/pull/2923), [#2691]" -"(https://github.com/adap/flower/pull/2691), [#3079](https://github.com/adap/" -"flower/pull/3079), [#2961](https://github.com/adap/flower/pull/2961), [#2924]" -"(https://github.com/adap/flower/pull/2924), [#3166](https://github.com/adap/" -"flower/pull/3166), [#3031](https://github.com/adap/flower/pull/3031), [#3057]" -"(https://github.com/adap/flower/pull/3057), [#3000](https://github.com/adap/" -"flower/pull/3000), [#3113](https://github.com/adap/flower/pull/3113), [#2957]" -"(https://github.com/adap/flower/pull/2957), [#3183](https://github.com/adap/" -"flower/pull/3183), [#3180](https://github.com/adap/flower/pull/3180), [#3035]" -"(https://github.com/adap/flower/pull/3035), [#3189](https://github.com/adap/" -"flower/pull/3189), [#3185](https://github.com/adap/flower/pull/3185), [#3190]" -"(https://github.com/adap/flower/pull/3190), [#3191](https://github.com/adap/" -"flower/pull/3191), [#3195](https://github.com/adap/flower/pull/3195), [#3197]" -"(https://github.com/adap/flower/pull/3197))" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:1020 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. You " -"can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or `quickstart-" -"tensorflow`, a detailed migration guide will follow shortly). Flower Next " -"allows you to run multiple projects concurrently (we call this multi-run) " -"and execute the same project in either simulation environments or deployment " -"environments without having to change a single line of code. The best part? " -"It's fully compatible with existing Flower projects that use `Strategy`, " -"`NumPyClient` & co." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-changelog.md:117 +#: ../../source/ref-changelog.md:1022 msgid "" -"**Introduce Flower Next low-level API (preview)** ([#3062](https://github." -"com/adap/flower/pull/3062), [#3034](https://github.com/adap/flower/" -"pull/3034), [#3069](https://github.com/adap/flower/pull/3069))" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the new " -"Flower Next *low-level* API. The low-level API allows for granular control " -"of every aspect of the learning process by sending/receiving individual " -"messages to/from client nodes. The new `ServerApp` supports registering a " -"custom `main` function that allows writing custom training loops for methods " -"like async FL, cyclic training, or federated analytics. The new `ClientApp` " -"supports registering `train`, `evaluate` and `query` functions that can " -"access the raw message received from the `ServerApp`. New abstractions like " -"`RecordSet`, `Message` and `Context` further enable sending multiple models, " -"multiple sets of config values and metrics, stateful computations on the " -"client node and implementations of custom SMPC protocols, to name just a few." +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-changelog.md:121 -msgid "" -"**Introduce Flower Mods (preview)** ([#3054](https://github.com/adap/flower/" -"pull/3054), [#2911](https://github.com/adap/flower/pull/2911), [#3083]" -"(https://github.com/adap/flower/pull/3083))" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:123 -msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable modules " -"that work across different projects. Flower 1.8 already includes mods to log " -"the size of a message, the number of parameters sent over the network, " -"differential privacy with fixed clipping and adaptive clipping, local " -"differential privacy and secure aggregation protocols SecAgg and SecAgg+. " -"The Flower Mods API is released as a preview, but researchers can already " -"use it to experiment with arbirtrary SMPC protocols." +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:125 -msgid "" -"**Fine-tune LLMs with LLM FlowerTune** ([#3029](https://github.com/adap/" -"flower/pull/3029), [#3089](https://github.com/adap/flower/pull/3089), [#3092]" -"(https://github.com/adap/flower/pull/3092), [#3100](https://github.com/adap/" -"flower/pull/3100), [#3114](https://github.com/adap/flower/pull/3114), [#3162]" -"(https://github.com/adap/flower/pull/3162), [#3172](https://github.com/adap/" -"flower/pull/3172))" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-changelog.md:127 -msgid "" -"We are introducing LLM FlowerTune, an introductory example that demonstrates " -"federated LLM fine-tuning of pre-trained Llama2 models on the Alpaca-GPT4 " -"dataset. The example is built to be easily adapted to use different models " -"and/or datasets. Read our blog post [LLM FlowerTune: Federated LLM Fine-" -"tuning with Flower](https://flower.ai/blog/2024-03-14-llm-flowertune-" -"federated-llm-finetuning-with-flower/) for more details." +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "" -"**Introduce built-in Differential Privacy (preview)** ([#2798](https://" -"github.com/adap/flower/pull/2798), [#2959](https://github.com/adap/flower/" -"pull/2959), [#3038](https://github.com/adap/flower/pull/3038), [#3147]" -"(https://github.com/adap/flower/pull/3147), [#2909](https://github.com/adap/" -"flower/pull/2909), [#2893](https://github.com/adap/flower/pull/2893), [#2892]" -"(https://github.com/adap/flower/pull/2892), [#3039](https://github.com/adap/" -"flower/pull/3039), [#3074](https://github.com/adap/flower/pull/3074))" +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/ref-changelog.md:131 +#: ../../source/ref-changelog.md:1031 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either " -"fixed or adaptive clipping. The clipping can happen either on the server-" -"side or the client-side. Local DP does both clipping and noising on the " -"client-side. A new documentation page [explains Differential Privacy " -"approaches](https://flower.ai/docs/framework/explanation-differential-" -"privacy.html) and a new how-to guide describes [how to use the new " -"Differential Privacy components](https://flower.ai/docs/framework/how-to-use-" -"differential-privacy.html) in Flower." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/ref-changelog.md:133 +#: ../../source/ref-changelog.md:1033 msgid "" -"**Introduce built-in Secure Aggregation (preview)** ([#3120](https://github." -"com/adap/flower/pull/3120), [#3110](https://github.com/adap/flower/" -"pull/3110), [#3108](https://github.com/adap/flower/pull/3108))" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:1035 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure " -"aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In this " -"initial release, we inlcude support for SecAgg and SecAgg+, but more " -"protocols will be implemented shortly. We'll also add detailed docs that " -"explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation in " -"the same project." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:1037 msgid "" -"**Introduce** `flwr` **CLI (preview)** ([#2942](https://github.com/adap/" -"flower/pull/2942), [#3055](https://github.com/adap/flower/pull/3055), [#3111]" -"(https://github.com/adap/flower/pull/3111), [#3130](https://github.com/adap/" -"flower/pull/3130), [#3136](https://github.com/adap/flower/pull/3136), [#3094]" -"(https://github.com/adap/flower/pull/3094), [#3059](https://github.com/adap/" -"flower/pull/3059), [#3049](https://github.com/adap/flower/pull/3049), [#3142]" -"(https://github.com/adap/flower/pull/3142))" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/ref-changelog.md:139 -msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`) " -"and then running them using the Simulation Engine (`flwr run`)." +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" msgstr "" -#: ../../source/ref-changelog.md:141 +#: ../../source/ref-changelog.md:1041 msgid "" -"**Introduce Flower Next Simulation Engine** ([#3024](https://github.com/adap/" -"flower/pull/3024), [#3061](https://github.com/adap/flower/pull/3061), [#2997]" -"(https://github.com/adap/flower/pull/2997), [#2783](https://github.com/adap/" -"flower/pull/2783), [#3184](https://github.com/adap/flower/pull/3184), [#3075]" -"(https://github.com/adap/flower/pull/3075), [#3047](https://github.com/adap/" -"flower/pull/3047), [#2998](https://github.com/adap/flower/pull/2998), [#3009]" -"(https://github.com/adap/flower/pull/3009), [#3008](https://github.com/adap/" -"flower/pull/3008))" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/ref-changelog.md:143 +#: ../../source/ref-changelog.md:1042 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For notebook " -"environments, there's also a new `run_simulation` function that can run " -"`ServerApp` and `ClientApp`." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:1043 msgid "" -"**Handle SuperNode connection errors** ([#2969](https://github.com/adap/" -"flower/pull/2969))" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:1044 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in case " -"of connection errors. The arguments `--max-retries` and `--max-wait-time` " -"can now be passed to the `flower-client-app` command. `--max-retries` will " -"define the number of tentatives the client should make before it gives up " -"trying to reconnect to the SuperLink, and, `--max-wait-time` defines the " -"time before the SuperNode gives up trying to reconnect to the SuperLink." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/ref-changelog.md:149 +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "" + +#: ../../source/ref-changelog.md:1050 msgid "" -"**General updates to Flower Baselines** ([#2904](https://github.com/adap/" -"flower/pull/2904), [#2482](https://github.com/adap/flower/pull/2482), [#2985]" -"(https://github.com/adap/flower/pull/2985), [#2968](https://github.com/adap/" -"flower/pull/2968))" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:1052 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:1054 msgid "" -"**Improve documentation and translations** ([#3050](https://github.com/adap/" -"flower/pull/3050), [#3044](https://github.com/adap/flower/pull/3044), [#3043]" -"(https://github.com/adap/flower/pull/3043), [#2986](https://github.com/adap/" -"flower/pull/2986), [#3041](https://github.com/adap/flower/pull/3041), [#3046]" -"(https://github.com/adap/flower/pull/3046), [#3042](https://github.com/adap/" -"flower/pull/3042), [#2978](https://github.com/adap/flower/pull/2978), [#2952]" -"(https://github.com/adap/flower/pull/2952), [#3167](https://github.com/adap/" -"flower/pull/3167), [#2953](https://github.com/adap/flower/pull/2953), [#3045]" -"(https://github.com/adap/flower/pull/3045), [#2654](https://github.com/adap/" -"flower/pull/2654), [#3082](https://github.com/adap/flower/pull/3082), [#2990]" -"(https://github.com/adap/flower/pull/2990), [#2989](https://github.com/adap/" -"flower/pull/2989))" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:1056 msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der Voort](https://" -"github.com/svdvoort) for landing a big documentation PR!" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/ref-changelog.md:157 +#: ../../source/ref-changelog.md:1058 msgid "" -"**General updates to Flower Examples** ([3134](https://github.com/adap/" -"flower/pull/3134), [2996](https://github.com/adap/flower/pull/2996), [2930]" -"(https://github.com/adap/flower/pull/2930), [2967](https://github.com/adap/" -"flower/pull/2967), [2467](https://github.com/adap/flower/pull/2467), [2910]" -"(https://github.com/adap/flower/pull/2910), [#2918](https://github.com/adap/" -"flower/pull/2918), [#2773](https://github.com/adap/flower/pull/2773), [#3063]" -"(https://github.com/adap/flower/pull/3063), [#3116](https://github.com/adap/" -"flower/pull/3116), [#3117](https://github.com/adap/flower/pull/3117))" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:1060 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) and " -"federated learning in a medical context using the popular MONAI library. " -"`quickstart-pytorch` and `quickstart-tensorflow` demonstrate the new Flower " -"Next `ServerApp` and `ClientApp`. Many other examples received considerable " -"updates as well." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:1062 msgid "" -"**General improvements** ([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), [3003](https://github.com/" -"adap/flower/pull/3003), [3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), [3085](https://github.com/" -"adap/flower/pull/3085), [3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), [2991](https://github.com/" -"adap/flower/pull/2991), [2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), [3086](https://github.com/" -"adap/flower/pull/3086), [2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), [2941](https://github.com/" -"adap/flower/pull/2941), [2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), [2973](https://github.com/" -"adap/flower/pull/2973), [2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), [3040](https://github.com/" -"adap/flower/pull/3040), [3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), [2902](https://github.com/" -"adap/flower/pull/2902), [2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), [3132](https://github.com/" -"adap/flower/pull/3132), [3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), [3064](https://github.com/" -"adap/flower/pull/3064), [3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), [3178](https://github.com/" -"adap/flower/pull/3178), [2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), [3091](https://github.com/" -"adap/flower/pull/3091), [3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), [3013](https://github.com/" -"adap/flower/pull/3013), [3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), [3068](https://github.com/" -"adap/flower/pull/3068), [2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), [2984](https://github.com/" -"adap/flower/pull/2984), [2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), [3143](https://github.com/" -"adap/flower/pull/3143), [2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), [2927](https://github.com/" -"adap/flower/pull/2927), [2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), [2912](https://github.com/" -"adap/flower/pull/2912), [3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), [2922](https://github.com/" -"adap/flower/pull/2922), [2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), [3179](https://github.com/" -"adap/flower/pull/3179), [3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), [3187](https://github.com/" -"adap/flower/pull/3187), [2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), [3144](https://github.com/" -"adap/flower/pull/3144), [3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), [#2836](https://github." -"com/adap/flower/pull/2836), [#2929](https://github.com/adap/flower/" -"pull/2929), [#2943](https://github.com/adap/flower/pull/2943), [#2955]" -"(https://github.com/adap/flower/pull/2955), [#2954](https://github.com/adap/" -"flower/pull/2954))" -msgstr "" - -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/ref-changelog.md:167 -msgid "v1.7.0 (2024-02-05)" +#: ../../source/ref-changelog.md:1064 +msgid "" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:1066 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles Beauville`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S Chaitanya Kumar`, `Mohammad " -"Naseri`, `Nikos Vlachakis`, `Pritam Neog`, `Robert Kuska`, `Robert Steiner`, " -"`Taner Topal`, `Yahia Salaheldin Shaaban`, `Yan Gao`, `Yasar Abbas` " +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/ref-changelog.md:177 +#: ../../source/ref-changelog.md:1068 msgid "" -"**Introduce stateful clients (experimental)** ([#2770](https://github.com/" -"adap/flower/pull/2770), [#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), [#2643](https://github." -"com/adap/flower/pull/2643), [#2769](https://github.com/adap/flower/" -"pull/2769))" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/ref-changelog.md:179 +#: ../../source/ref-changelog.md:1070 msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via `start_simulation`) " -"and networked clients (via `start_client`). It's also the first preview of " -"new abstractions like `Context` and `RecordSet`. Clients can access state of " -"type `RecordSet` via `state: RecordSet = self.context.state`. Changes to " -"this `RecordSet` are preserved across different rounds of execution to " -"enable stateful computations in a unified way across simulation and " -"deployment." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:1072 msgid "" -"**Improve performance** ([#2293](https://github.com/adap/flower/pull/2293))" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:1074 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-place " -"aggregation to reduce memory consumption. The Flower client serialization/" -"deserialization has been rewritten from the ground up, which results in " -"significant speedups, especially when the client-side training time is short." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:1076 msgid "" -"**Support Federated Learning with Apple MLX and Flower** ([#2693](https://" -"github.com/adap/flower/pull/2693))" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/ref-changelog.md:187 +#: ../../source/ref-changelog.md:1078 msgid "" -"Flower has official support for federated learning using [Apple MLX](https://" -"ml-explore.github.io/mlx) via the new `quickstart-mlx` code example." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:1080 msgid "" -"**Introduce new XGBoost cyclic strategy** ([#2666](https://github.com/adap/" -"flower/pull/2666), [#2668](https://github.com/adap/flower/pull/2668))" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-changelog.md:191 +#: ../../source/ref-changelog.md:1082 msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of " -"training (often called cyclic). The `xgboost-comprehensive` code example " -"shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower offers " -"best-in-class XGBoost support." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-changelog.md:1084 msgid "" -"**Support Python 3.11** ([#2394](https://github.com/adap/flower/pull/2394))" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-changelog.md:1086 msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will ensure " -"better support for users using more recent Python versions." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/ref-changelog.md:197 +#: ../../source/ref-changelog.md:1088 msgid "" -"**Update gRPC and ProtoBuf dependencies** ([#2814](https://github.com/adap/" -"flower/pull/2814))" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:1090 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:1092 msgid "" -"**Introduce Docker image for Flower server** ([#2700](https://github.com/" -"adap/flower/pull/2700), [#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), [#2695](https://github." -"com/adap/flower/pull/2695), [#2747](https://github.com/adap/flower/" -"pull/2747), [#2746](https://github.com/adap/flower/pull/2746), [#2680]" -"(https://github.com/adap/flower/pull/2680), [#2682](https://github.com/adap/" -"flower/pull/2682), [#2701](https://github.com/adap/flower/pull/2701))" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:1096 msgid "" -"The Flower server can now be run using an official Docker image. A new how-" -"to guide explains [how to run Flower using Docker](https://flower.ai/docs/" -"framework/how-to-run-flower-using-docker.html). An official Flower client " -"Docker image will follow." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:1097 msgid "" -"**Introduce** `flower-via-docker-compose` **example** ([#2626](https://" -"github.com/adap/flower/pull/2626))" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/ref-changelog.md:207 +#: ../../source/ref-changelog.md:1098 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** ([#2719](https://" -"github.com/adap/flower/pull/2719))" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/ref-changelog.md:209 +#: ../../source/ref-changelog.md:1099 msgid "" -"**Introduce** `custom-metrics` **example** ([#1958](https://github.com/adap/" -"flower/pull/1958))" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-changelog.md:1100 msgid "" -"**Update code examples to use Flower Datasets** ([#2450](https://github.com/" -"adap/flower/pull/2450), [#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), [#2712](https://github." -"com/adap/flower/pull/2712))" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-changelog.md:1104 msgid "" -"Several code examples were updated to use [Flower Datasets](https://flower." -"ai/docs/datasets/)." +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-changelog.md:1105 msgid "" -"**General updates to Flower Examples** ([#2381](https://github.com/adap/" -"flower/pull/2381), [#2805](https://github.com/adap/flower/pull/2805), [#2782]" -"(https://github.com/adap/flower/pull/2782), [#2806](https://github.com/adap/" -"flower/pull/2806), [#2829](https://github.com/adap/flower/pull/2829), [#2825]" -"(https://github.com/adap/flower/pull/2825), [#2816](https://github.com/adap/" -"flower/pull/2816), [#2726](https://github.com/adap/flower/pull/2726), [#2659]" -"(https://github.com/adap/flower/pull/2659), [#2655](https://github.com/adap/" -"flower/pull/2655))" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +#: ../../source/ref-changelog.md:1106 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:1108 msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), [#2771]" -"(https://github.com/adap/flower/pull/2771))" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/ref-changelog.md:222 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-changelog.md:1109 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:223 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: ../../source/ref-changelog.md:1110 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:224 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: ../../source/ref-changelog.md:1111 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/ref-changelog.md:226 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-changelog.md:1117 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/ref-changelog.md:228 -msgid "" -"**Improve documentation** ([#2674](https://github.com/adap/flower/" -"pull/2674), [#2480](https://github.com/adap/flower/pull/2480), [#2826]" -"(https://github.com/adap/flower/pull/2826), [#2727](https://github.com/adap/" -"flower/pull/2727), [#2761](https://github.com/adap/flower/pull/2761), [#2900]" -"(https://github.com/adap/flower/pull/2900))" -msgstr "" - -#: ../../source/ref-changelog.md:230 -msgid "" -"**Improved testing and development infrastructure** ([#2797](https://github." -"com/adap/flower/pull/2797), [#2676](https://github.com/adap/flower/" -"pull/2676), [#2644](https://github.com/adap/flower/pull/2644), [#2656]" -"(https://github.com/adap/flower/pull/2656), [#2848](https://github.com/adap/" -"flower/pull/2848), [#2675](https://github.com/adap/flower/pull/2675), [#2735]" -"(https://github.com/adap/flower/pull/2735), [#2767](https://github.com/adap/" -"flower/pull/2767), [#2732](https://github.com/adap/flower/pull/2732), [#2744]" -"(https://github.com/adap/flower/pull/2744), [#2681](https://github.com/adap/" -"flower/pull/2681), [#2699](https://github.com/adap/flower/pull/2699), [#2745]" -"(https://github.com/adap/flower/pull/2745), [#2734](https://github.com/adap/" -"flower/pull/2734), [#2731](https://github.com/adap/flower/pull/2731), [#2652]" -"(https://github.com/adap/flower/pull/2652), [#2720](https://github.com/adap/" -"flower/pull/2720), [#2721](https://github.com/adap/flower/pull/2721), [#2717]" -"(https://github.com/adap/flower/pull/2717), [#2864](https://github.com/adap/" -"flower/pull/2864), [#2694](https://github.com/adap/flower/pull/2694), [#2709]" -"(https://github.com/adap/flower/pull/2709), [#2658](https://github.com/adap/" -"flower/pull/2658), [#2796](https://github.com/adap/flower/pull/2796), [#2692]" -"(https://github.com/adap/flower/pull/2692), [#2657](https://github.com/adap/" -"flower/pull/2657), [#2813](https://github.com/adap/flower/pull/2813), [#2661]" -"(https://github.com/adap/flower/pull/2661), [#2398](https://github.com/adap/" -"flower/pull/2398))" -msgstr "" - -#: ../../source/ref-changelog.md:232 -msgid "" -"The Flower testing and development infrastructure has received substantial " -"updates. This makes Flower 1.7 the most tested release ever." -msgstr "" - -#: ../../source/ref-changelog.md:234 -msgid "" -"**Update dependencies** ([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), [#2739](https://github." -"com/adap/flower/pull/2739), [#2837](https://github.com/adap/flower/" -"pull/2837), [#2788](https://github.com/adap/flower/pull/2788), [#2811]" -"(https://github.com/adap/flower/pull/2811), [#2774](https://github.com/adap/" -"flower/pull/2774), [#2790](https://github.com/adap/flower/pull/2790), [#2751]" -"(https://github.com/adap/flower/pull/2751), [#2850](https://github.com/adap/" -"flower/pull/2850), [#2812](https://github.com/adap/flower/pull/2812), [#2872]" -"(https://github.com/adap/flower/pull/2872), [#2736](https://github.com/adap/" -"flower/pull/2736), [#2756](https://github.com/adap/flower/pull/2756), [#2857]" -"(https://github.com/adap/flower/pull/2857), [#2757](https://github.com/adap/" -"flower/pull/2757), [#2810](https://github.com/adap/flower/pull/2810), [#2740]" -"(https://github.com/adap/flower/pull/2740), [#2789](https://github.com/adap/" -"flower/pull/2789))" -msgstr "" - -#: ../../source/ref-changelog.md:236 -msgid "" -"**General improvements** ([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), [#2877](https://github." -"com/adap/flower/pull/2877), [#2690](https://github.com/adap/flower/" -"pull/2690), [#2889](https://github.com/adap/flower/pull/2889), [#2874]" -"(https://github.com/adap/flower/pull/2874), [#2819](https://github.com/adap/" -"flower/pull/2819), [#2689](https://github.com/adap/flower/pull/2689), [#2457]" -"(https://github.com/adap/flower/pull/2457), [#2870](https://github.com/adap/" -"flower/pull/2870), [#2669](https://github.com/adap/flower/pull/2669), [#2876]" -"(https://github.com/adap/flower/pull/2876), [#2885](https://github.com/adap/" -"flower/pull/2885), [#2858](https://github.com/adap/flower/pull/2858), [#2867]" -"(https://github.com/adap/flower/pull/2867), [#2351](https://github.com/adap/" -"flower/pull/2351), [#2886](https://github.com/adap/flower/pull/2886), [#2860]" -"(https://github.com/adap/flower/pull/2860), [#2828](https://github.com/adap/" -"flower/pull/2828), [#2869](https://github.com/adap/flower/pull/2869), [#2875]" -"(https://github.com/adap/flower/pull/2875), [#2733](https://github.com/adap/" -"flower/pull/2733), [#2488](https://github.com/adap/flower/pull/2488), [#2646]" -"(https://github.com/adap/flower/pull/2646), [#2879](https://github.com/adap/" -"flower/pull/2879), [#2821](https://github.com/adap/flower/pull/2821), [#2855]" -"(https://github.com/adap/flower/pull/2855), [#2800](https://github.com/adap/" -"flower/pull/2800), [#2807](https://github.com/adap/flower/pull/2807), [#2801]" -"(https://github.com/adap/flower/pull/2801), [#2804](https://github.com/adap/" -"flower/pull/2804), [#2851](https://github.com/adap/flower/pull/2851), [#2787]" -"(https://github.com/adap/flower/pull/2787), [#2852](https://github.com/adap/" -"flower/pull/2852), [#2672](https://github.com/adap/flower/pull/2672), [#2759]" -"(https://github.com/adap/flower/pull/2759))" -msgstr "" - -#: ../../source/ref-changelog.md:240 -msgid "" -"**Deprecate** `start_numpy_client` ([#2563](https://github.com/adap/flower/" -"pull/2563), [#2718](https://github.com/adap/flower/pull/2718))" -msgstr "" - -#: ../../source/ref-changelog.md:242 +#: ../../source/ref-changelog.md:1119 msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we have " -"introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need to " -"first call the `.to_client()` method and then pass returned `Client` object " -"to `start_client`. The examples and the documentation have been updated " -"accordingly." +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/ref-changelog.md:244 +#: ../../source/ref-changelog.md:1121 msgid "" -"**Deprecate legacy DP wrappers** ([#2749](https://github.com/adap/flower/" -"pull/2749))" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/ref-changelog.md:246 +#: ../../source/ref-changelog.md:1123 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is in " -"preparation for an all-new pluggable version of differential privacy support " -"in Flower." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/ref-changelog.md:248 +#: ../../source/ref-changelog.md:1125 msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-changelog.md:1127 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` ([#2890]" -"(https://github.com/adap/flower/pull/2890))" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-changelog.md:1129 msgid "" -"**Drop experimental** `Task` **fields** ([#2866](https://github.com/adap/" -"flower/pull/2866), [#2865](https://github.com/adap/flower/pull/2865))" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/ref-changelog.md:254 +#: ../../source/ref-changelog.md:1131 msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed fields " -"are superseded by the new `RecordSet` abstraction." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/ref-changelog.md:256 +#: ../../source/ref-changelog.md:1133 msgid "" -"**Retire MXNet examples** ([#2724](https://github.com/adap/flower/pull/2724))" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/ref-changelog.md:258 +#: ../../source/ref-changelog.md:1135 msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/ref-changelog.md:260 -msgid "v1.6.0 (2023-11-28)" +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/ref-changelog.md:266 +#: ../../source/ref-changelog.md:1139 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`, " -"`Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, `Steve " -"Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, `cnxdeveloper`, " -"`k3nfalt` " +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/ref-changelog.md:270 +#: ../../source/ref-changelog.md:1141 msgid "" -"**Add experimental support for Python 3.12** ([#2565](https://github.com/" -"adap/flower/pull/2565))" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/ref-changelog.md:272 +#: ../../source/ref-changelog.md:1143 msgid "" -"**Add new XGBoost examples** ([#2612](https://github.com/adap/flower/" -"pull/2612), [#2554](https://github.com/adap/flower/pull/2554), [#2617]" -"(https://github.com/adap/flower/pull/2617), [#2618](https://github.com/adap/" -"flower/pull/2618), [#2619](https://github.com/adap/flower/pull/2619), [#2567]" -"(https://github.com/adap/flower/pull/2567))" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/ref-changelog.md:274 +#: ../../source/ref-changelog.md:1145 msgid "" -"We have added a new `xgboost-quickstart` example alongside a new `xgboost-" -"comprehensive` example that goes more in-depth." +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/ref-changelog.md:276 +#: ../../source/ref-changelog.md:1147 msgid "" -"**Add Vertical FL example** ([#2598](https://github.com/adap/flower/" -"pull/2598))" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/ref-changelog.md:278 +#: ../../source/ref-changelog.md:1149 msgid "" -"We had many questions about Vertical Federated Learning using Flower, so we " -"decided to add an simple example for it on the [Titanic dataset](https://www." -"kaggle.com/competitions/titanic/data) alongside a tutorial (in the README)." +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/ref-changelog.md:280 +#: ../../source/ref-changelog.md:1151 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` ([#2292](https://" -"github.com/adap/flower/pull/2292))" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/ref-changelog.md:282 +#: ../../source/ref-changelog.md:1153 msgid "" -"**Update REST API to support create and delete nodes** ([#2283](https://" -"github.com/adap/flower/pull/2283))" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:284 +#: ../../source/ref-changelog.md:1155 msgid "" -"**Update the Android SDK** ([#2187](https://github.com/adap/flower/" -"pull/2187))" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: ../../source/ref-changelog.md:1157 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:288 +#: ../../source/ref-changelog.md:1159 msgid "" -"**Update the C++ SDK** ([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), [#2523](https://github." -"com/adap/flower/pull/2523), [#2522](https://github.com/adap/flower/" -"pull/2522))" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: ../../source/ref-changelog.md:1161 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-changelog.md:1165 msgid "" -"**Make HTTPS the new default** ([#2591](https://github.com/adap/flower/" -"pull/2591), [#2636](https://github.com/adap/flower/pull/2636))" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:294 +#: ../../source/ref-changelog.md:1166 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP for " -"prototyping. The same applies to `flower-client`, which can either use user-" -"provided credentials or gRPC-bundled certificates to connect to an HTTPS-" -"enabled server or requires opt-out via passing `--insecure` to enable " -"insecure HTTP connections." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-changelog.md:296 +#: ../../source/ref-changelog.md:1167 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` will " -"still start in insecure mode by default. In a future release, insecure " -"connections will require user opt-in by passing `insecure=True`." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-changelog.md:1168 msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), [#2493](https://github." -"com/adap/flower/pull/2493))" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-changelog.md:300 +#: ../../source/ref-changelog.md:1169 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as standalone " -"processes (i.e. via `start_client`) or in simulation (i.e. via " -"`start_simulation`) without requiring changes to how the client class is " -"defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-changelog.md:302 +#: ../../source/ref-changelog.md:1170 msgid "" -"**Add new** `Bulyan` **strategy** ([#1817](https://github.com/adap/flower/" -"pull/1817), [#1891](https://github.com/adap/flower/pull/1891))" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-changelog.md:304 +#: ../../source/ref-changelog.md:1171 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., 2018]" -"(https://arxiv.org/abs/1802.07927)" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-changelog.md:1175 msgid "" -"**Add new** `XGB Bagging` **strategy** ([#2611](https://github.com/adap/" -"flower/pull/2611))" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 +#: ../../source/ref-changelog.md:1177 msgid "" -"**Introduce `WorkloadState`** ([#2564](https://github.com/adap/flower/" -"pull/2564), [#2632](https://github.com/adap/flower/pull/2632))" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), [#2286](https://" -"github.com/adap/flower/pull/2286), [#2509](https://github.com/adap/flower/" -"pull/2509))" +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-changelog.md:316 +#: ../../source/ref-changelog.md:1183 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), [#2400]" -"(https://github.com/adap/flower/pull/2400))" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-changelog.md:1185 msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), [#2507](https://" -"github.com/adap/flower/pull/2507))" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-changelog.md:1187 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), [#2508](https://" -"github.com/adap/flower/pull/2508))" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: ../../source/ref-changelog.md:1191 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-changelog.md:1192 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-changelog.md:1198 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-changelog.md:1204 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" -"github.com/adap/flower/pull/2615))" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-changelog.md:1205 msgid "" -"**General updates to Flower Examples** ([#2384](https://github.com/adap/" -"flower/pull/2384), [#2425](https://github.com/adap/flower/pull/2425), [#2526]" -"(https://github.com/adap/flower/pull/2526), [#2302](https://github.com/adap/" -"flower/pull/2302), [#2545](https://github.com/adap/flower/pull/2545))" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-changelog.md:1206 msgid "" -"**General updates to Flower Baselines** ([#2301](https://github.com/adap/" -"flower/pull/2301), [#2305](https://github.com/adap/flower/pull/2305), [#2307]" -"(https://github.com/adap/flower/pull/2307), [#2327](https://github.com/adap/" -"flower/pull/2327), [#2435](https://github.com/adap/flower/pull/2435), [#2462]" -"(https://github.com/adap/flower/pull/2462), [#2463](https://github.com/adap/" -"flower/pull/2463), [#2461](https://github.com/adap/flower/pull/2461), [#2469]" -"(https://github.com/adap/flower/pull/2469), [#2466](https://github.com/adap/" -"flower/pull/2466), [#2471](https://github.com/adap/flower/pull/2471), [#2472]" -"(https://github.com/adap/flower/pull/2472), [#2470](https://github.com/adap/" -"flower/pull/2470))" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-changelog.md:1207 msgid "" -"**General updates to the simulation engine** ([#2331](https://github.com/" -"adap/flower/pull/2331), [#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), [#2294](https://github." -"com/adap/flower/pull/2294))" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-changelog.md:1208 msgid "" -"**General updates to Flower SDKs** ([#2288](https://github.com/adap/flower/" -"pull/2288), [#2429](https://github.com/adap/flower/pull/2429), [#2555]" -"(https://github.com/adap/flower/pull/2555), [#2543](https://github.com/adap/" -"flower/pull/2543), [#2544](https://github.com/adap/flower/pull/2544), [#2597]" -"(https://github.com/adap/flower/pull/2597), [#2623](https://github.com/adap/" -"flower/pull/2623))" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-changelog.md:346 +#: ../../source/ref-changelog.md:1212 msgid "" -"**General improvements** ([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), [#2313](https://github." -"com/adap/flower/pull/2313), [#2316](https://github.com/adap/flower/" -"pull/2316), [#2317](https://github.com/adap/flower/pull/2317), [#2349]" -"(https://github.com/adap/flower/pull/2349), [#2360](https://github.com/adap/" -"flower/pull/2360), [#2402](https://github.com/adap/flower/pull/2402), [#2446]" -"(https://github.com/adap/flower/pull/2446), [#2561](https://github.com/adap/" -"flower/pull/2561), [#2273](https://github.com/adap/flower/pull/2273), [#2267]" -"(https://github.com/adap/flower/pull/2267), [#2274](https://github.com/adap/" -"flower/pull/2274), [#2275](https://github.com/adap/flower/pull/2275), [#2432]" -"(https://github.com/adap/flower/pull/2432), [#2251](https://github.com/adap/" -"flower/pull/2251), [#2321](https://github.com/adap/flower/pull/2321), [#1936]" -"(https://github.com/adap/flower/pull/1936), [#2408](https://github.com/adap/" -"flower/pull/2408), [#2413](https://github.com/adap/flower/pull/2413), [#2401]" -"(https://github.com/adap/flower/pull/2401), [#2531](https://github.com/adap/" -"flower/pull/2531), [#2534](https://github.com/adap/flower/pull/2534), [#2535]" -"(https://github.com/adap/flower/pull/2535), [#2521](https://github.com/adap/" -"flower/pull/2521), [#2553](https://github.com/adap/flower/pull/2553), [#2596]" -"(https://github.com/adap/flower/pull/2596))" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 +#: ../../source/ref-changelog.md:1214 msgid "" -"Flower received many improvements under the hood, too many to list here." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-changelog.md:352 +#: ../../source/ref-changelog.md:1216 msgid "" -"**Remove support for Python 3.7** ([#2280](https://github.com/adap/flower/" -"pull/2280), [#2299](https://github.com/adap/flower/pull/2299), [#2304]" -"(https://github.com/adap/flower/pull/2304), [#2306](https://github.com/adap/" -"flower/pull/2306), [#2355](https://github.com/adap/flower/pull/2355), [#2356]" -"(https://github.com/adap/flower/pull/2356))" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-changelog.md:354 +#: ../../source/ref-changelog.md:1218 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes " -"support. Flower now requires Python 3.8." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-changelog.md:1220 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` ([#2324]" -"(https://github.com/adap/flower/pull/2324))" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:1222 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` and " -"`start_numpy_client`. Use `transport=\"rest\"` to opt into the experimental " -"REST API instead." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-changelog.md:1228 msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:370 -msgid "" -"**Introduce new simulation engine** ([#1969](https://github.com/adap/flower/" -"pull/1969), [#2221](https://github.com/adap/flower/pull/2221), [#2248]" -"(https://github.com/adap/flower/pull/2248))" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-changelog.md:372 +#: ../../source/ref-changelog.md:1233 msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and " -"memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-only, " -"CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:374 +#: ../../source/ref-changelog.md:1235 msgid "" -"Comprehensive documentation includes a new [how-to run simulations](https://" -"flower.ai/docs/framework/how-to-run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial series](https://www." -"youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/ref-changelog.md:1237 msgid "" -"**Restructure Flower Docs** ([#1824](https://github.com/adap/flower/" -"pull/1824), [#1865](https://github.com/adap/flower/pull/1865), [#1884]" -"(https://github.com/adap/flower/pull/1884), [#1887](https://github.com/adap/" -"flower/pull/1887), [#1919](https://github.com/adap/flower/pull/1919), [#1922]" -"(https://github.com/adap/flower/pull/1922), [#1920](https://github.com/adap/" -"flower/pull/1920), [#1923](https://github.com/adap/flower/pull/1923), [#1924]" -"(https://github.com/adap/flower/pull/1924), [#1962](https://github.com/adap/" -"flower/pull/1962), [#2006](https://github.com/adap/flower/pull/2006), [#2133]" -"(https://github.com/adap/flower/pull/2133), [#2203](https://github.com/adap/" -"flower/pull/2203), [#2215](https://github.com/adap/flower/pull/2215), [#2122]" -"(https://github.com/adap/flower/pull/2122), [#2223](https://github.com/adap/" -"flower/pull/2223), [#2219](https://github.com/adap/flower/pull/2219), [#2232]" -"(https://github.com/adap/flower/pull/2232), [#2233](https://github.com/adap/" -"flower/pull/2233), [#2234](https://github.com/adap/flower/pull/2234), [#2235]" -"(https://github.com/adap/flower/pull/2235), [#2237](https://github.com/adap/" -"flower/pull/2237), [#2238](https://github.com/adap/flower/pull/2238), [#2242]" -"(https://github.com/adap/flower/pull/2242), [#2231](https://github.com/adap/" -"flower/pull/2231), [#2243](https://github.com/adap/flower/pull/2243), [#2227]" -"(https://github.com/adap/flower/pull/2227))" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-changelog.md:378 +#: ../../source/ref-changelog.md:1239 msgid "" -"Much effort went into a completely restructured Flower docs experience. The " -"documentation on [flower.ai/docs](https://flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS SDK, " -"and code example projects." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-changelog.md:380 +#: ../../source/ref-changelog.md:1241 msgid "" -"**Introduce Flower Swift SDK** ([#1858](https://github.com/adap/flower/" -"pull/1858), [#1897](https://github.com/adap/flower/pull/1897))" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:382 +#: ../../source/ref-changelog.md:1243 msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support on " -"iOS is improving, and alongside the Swift SDK and code example, there is now " -"also an iOS quickstart tutorial." +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-changelog.md:384 +#: ../../source/ref-changelog.md:1245 msgid "" -"**Introduce Flower Android SDK** ([#2131](https://github.com/adap/flower/" -"pull/2131))" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-changelog.md:386 +#: ../../source/ref-changelog.md:1247 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower support " -"on Android is improving, and alongside the Kotlin SDK and code example, " -"there is now also an Android quickstart tutorial." +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-changelog.md:388 -msgid "" -"**Introduce new end-to-end testing infrastructure** ([#1842](https://github." -"com/adap/flower/pull/1842), [#2071](https://github.com/adap/flower/" -"pull/2071), [#2072](https://github.com/adap/flower/pull/2072), [#2068]" -"(https://github.com/adap/flower/pull/2068), [#2067](https://github.com/adap/" -"flower/pull/2067), [#2069](https://github.com/adap/flower/pull/2069), [#2073]" -"(https://github.com/adap/flower/pull/2073), [#2070](https://github.com/adap/" -"flower/pull/2070), [#2074](https://github.com/adap/flower/pull/2074), [#2082]" -"(https://github.com/adap/flower/pull/2082), [#2084](https://github.com/adap/" -"flower/pull/2084), [#2093](https://github.com/adap/flower/pull/2093), [#2109]" -"(https://github.com/adap/flower/pull/2109), [#2095](https://github.com/adap/" -"flower/pull/2095), [#2140](https://github.com/adap/flower/pull/2140), [#2137]" -"(https://github.com/adap/flower/pull/2137), [#2165](https://github.com/adap/" -"flower/pull/2165))" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-changelog.md:390 +#: ../../source/ref-changelog.md:1251 msgid "" -"A new testing infrastructure ensures that new changes stay compatible with " -"existing framework integrations or strategies." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/ref-changelog.md:1257 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for " -"Python 3.7 is now deprecated and will be removed in an upcoming release." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-changelog.md:396 +#: ../../source/ref-changelog.md:1259 msgid "" -"**Add new** `FedTrimmedAvg` **strategy** ([#1769](https://github.com/adap/" -"flower/pull/1769), [#1853](https://github.com/adap/flower/pull/1853))" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-changelog.md:398 +#: ../../source/ref-changelog.md:1261 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, 2018]" -"(https://arxiv.org/abs/1803.01498)." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:400 -msgid "" -"**Introduce start_driver** ([#1697](https://github.com/adap/flower/" -"pull/1697))" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-changelog.md:402 +#: ../../source/ref-changelog.md:1267 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a new " -"`start_driver` function that allows for running `start_server` scripts as a " -"Flower driver with only a single-line code change. Check out the `mt-" -"pytorch` code example to see a working example using `start_driver`." +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-changelog.md:404 +#: ../../source/ref-changelog.md:1269 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** ([#1785]" -"(https://github.com/adap/flower/pull/1785))" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-changelog.md:406 +#: ../../source/ref-changelog.md:1271 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a " -"driver script. The included `driver.py` and `server.py` have been aligned to " -"demonstrate both the low-level way and the high-level way of building server-" -"side logic." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-changelog.md:408 +#: ../../source/ref-changelog.md:1290 msgid "" -"**Migrate experimental REST API to Starlette** ([2171](https://github.com/" -"adap/flower/pull/2171))" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-changelog.md:410 +#: ../../source/ref-changelog.md:1294 msgid "" -"The (experimental) REST API used to be implemented in [FastAPI](https://" -"fastapi.tiangolo.com/), but it has now been migrated to use [Starlette]" -"(https://www.starlette.io/) directly." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" + +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/ref-changelog.md:412 +#: ../../source/ref-changelog.md:1300 msgid "" -"Please note: The REST request-response API is still experimental and will " -"likely change significantly over time." +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-changelog.md:414 +#: ../../source/ref-changelog.md:1302 msgid "" -"**Introduce experimental gRPC request-response API** ([#1867](https://github." -"com/adap/flower/pull/1867), [#1901](https://github.com/adap/flower/" -"pull/1901))" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-changelog.md:416 +#: ../../source/ref-changelog.md:1304 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) and " -"the experimental REST API, there is now a new gRPC API that uses a request-" -"response model to communicate with client nodes." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-changelog.md:418 +#: ../../source/ref-changelog.md:1306 msgid "" -"Please note: The gRPC request-response API is still experimental and will " -"likely change significantly over time." +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/ref-changelog.md:420 +#: ../../source/ref-changelog.md:1308 msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` ([#1880](https://github.com/adap/flower/" -"pull/1880))" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:422 +#: ../../source/ref-changelog.md:1323 msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in favour " -"of a new argument `transport`. `start_client(transport=\"rest\")` will yield " -"the same behaviour as `start_client(rest=True)` did before. All code should " -"migrate to the new argument `transport`. The deprecated argument `rest` will " -"be removed in a future release." +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-changelog.md:424 +#: ../../source/ref-changelog.md:1325 msgid "" -"**Add a new gRPC option** ([#2197](https://github.com/adap/flower/pull/2197))" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-changelog.md:426 +#: ../../source/ref-changelog.md:1327 msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls` " -"option set to 0 by default. This prevents the clients from sending keepalive " -"pings when there is no outstanding stream." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-changelog.md:428 +#: ../../source/ref-changelog.md:1329 msgid "" -"**Improve example notebooks** ([#2005](https://github.com/adap/flower/" -"pull/2005))" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-changelog.md:432 +#: ../../source/ref-changelog.md:1350 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), [#1981](https://github." -"com/adap/flower/pull/1981), [#1988](https://github.com/adap/flower/" -"pull/1988), [#1984](https://github.com/adap/flower/pull/1984), [#1982]" -"(https://github.com/adap/flower/pull/1982), [#2112](https://github.com/adap/" -"flower/pull/2112), [#2144](https://github.com/adap/flower/pull/2144), [#2174]" -"(https://github.com/adap/flower/pull/2174), [#2225](https://github.com/adap/" -"flower/pull/2225), [#2183](https://github.com/adap/flower/pull/2183))" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:434 -msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A " -"major upgrade is that all code examples now have a `requirements.txt` (in " -"addition to `pyproject.toml`)." +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" msgstr "" -#: ../../source/ref-changelog.md:436 -msgid "" -"**General improvements** ([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), [#1884](https://github." -"com/adap/flower/pull/1884), [#1837](https://github.com/adap/flower/" -"pull/1837), [#1477](https://github.com/adap/flower/pull/1477), [#2171]" -"(https://github.com/adap/flower/pull/2171))" +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/ref-changelog.md:450 +#: ../../source/ref-changelog.md:1354 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, `Iacob-" -"Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal Sarkhel`, " -"`L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic Lane`, " -"`Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, `Steve " -"Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:454 +#: ../../source/ref-changelog.md:1355 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and example)** " -"([#1694](https://github.com/adap/flower/pull/1694), [#1709](https://github." -"com/adap/flower/pull/1709), [#1715](https://github.com/adap/flower/" -"pull/1715), [#1717](https://github.com/adap/flower/pull/1717), [#1763]" -"(https://github.com/adap/flower/pull/1763), [#1795](https://github.com/adap/" -"flower/pull/1795))" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/ref-changelog.md:456 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg` " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/" -"strategy/fedxgb_nn_avg.py), and a [code example](https://github.com/adap/" -"flower/tree/main/examples/xgboost-quickstart) that demonstrates the usage of " -"this new strategy in an XGBoost project." +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" msgstr "" -#: ../../source/ref-changelog.md:458 +#: ../../source/ref-changelog.md:1359 msgid "" -"**Introduce iOS SDK (preview)** ([#1621](https://github.com/adap/flower/" -"pull/1621), [#1764](https://github.com/adap/flower/pull/1764))" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/ref-changelog.md:460 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning on " -"iOS mobile devices. We now have a swift iOS SDK present under [src/swift/" -"flwr](https://github.com/adap/flower/tree/main/src/swift/flwr) that will " -"facilitate greatly the app creating process. To showcase its use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/ref-changelog.md:462 -msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** ([#1657](https://" -"github.com/adap/flower/pull/1657), [#1721](https://github.com/adap/flower/" -"pull/1721))" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" msgstr "" -#: ../../source/ref-changelog.md:464 +#: ../../source/ref-changelog.md:1365 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-what-" -"is-federated-learning.html) in our documentation explains the basics of " -"Fedetated Learning. It enables anyone who's unfamiliar with Federated " -"Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/ref-changelog.md:466 +#: ../../source/ref-changelog.md:1366 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** ([#1513](https://github.com/" -"adap/flower/pull/1513), [#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), [#1679](https://github." -"com/adap/flower/pull/1679))" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/ref-changelog.md:468 +#: ../../source/ref-changelog.md:1367 msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated " -"Optimization in Heterogeneous Networks (Li et al., 2018)](https://arxiv.org/" -"abs/1812.06127). It uses the `FedProx` strategy, which aims at making " -"convergence more robust in heterogeneous settings." +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/ref-changelog.md:470 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** ([#1655](https://github." -"com/adap/flower/pull/1655))" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/ref-changelog.md:472 -msgid "" -"This new baseline replicates an experiment evaluating the performance of the " -"FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A Benchmark " -"for Federated Settings (Caldas et al., 2018)](https://arxiv.org/" -"abs/1812.01097)." +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" msgstr "" -#: ../../source/ref-changelog.md:474 +#: ../../source/ref-changelog.md:1373 msgid "" -"**Introduce (experimental) REST API** ([#1594](https://github.com/adap/" -"flower/pull/1594), [#1690](https://github.com/adap/flower/pull/1690), [#1695]" -"(https://github.com/adap/flower/pull/1695), [#1712](https://github.com/adap/" -"flower/pull/1712), [#1802](https://github.com/adap/flower/pull/1802), [#1770]" -"(https://github.com/adap/flower/pull/1770), [#1733](https://github.com/adap/" -"flower/pull/1733))" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/ref-changelog.md:476 -msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:478 -msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/ref-changelog.md:480 +#: ../../source/ref-changelog.md:1381 msgid "" -"**Improve the (experimental) Driver API** ([#1663](https://github.com/adap/" -"flower/pull/1663), [#1666](https://github.com/adap/flower/pull/1666), [#1667]" -"(https://github.com/adap/flower/pull/1667), [#1664](https://github.com/adap/" -"flower/pull/1664), [#1675](https://github.com/adap/flower/pull/1675), [#1676]" -"(https://github.com/adap/flower/pull/1676), [#1693](https://github.com/adap/" -"flower/pull/1693), [#1662](https://github.com/adap/flower/pull/1662), [#1794]" -"(https://github.com/adap/flower/pull/1794))" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:1382 msgid "" -"The Driver API is still an experimental feature, but this release introduces " -"some major upgrades. One of the main improvements is the introduction of an " -"SQLite database to store server state on disk (instead of in-memory). " -"Another improvement is that tasks (instructions or results) that have been " -"delivered will now be deleted. This greatly improves the memory efficiency " -"of a long-running Flower server." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-changelog.md:1383 msgid "" -"**Fix spilling issues related to Ray during simulations** ([#1698](https://" -"github.com/adap/flower/pull/1698))" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:1384 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts of " -"data that would make the training unable to continue. This is now fixed! 🎉" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/ref-changelog.md:488 +#: ../../source/ref-changelog.md:1385 msgid "" -"**Add new example using** `TabNet` **and Flower** ([#1725](https://github." -"com/adap/flower/pull/1725))" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/ref-changelog.md:490 -msgid "" -"TabNet is a powerful and flexible framework for training machine learning " -"models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/" -"quickstart-tabnet)." +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-example-projects.rst:4 msgid "" -"**Add new how-to guide for monitoring simulations** ([#1649](https://github." -"com/adap/flower/pull/1649))" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-example-projects.rst:10 msgid "" -"We now have a documentation guide to help users monitor their performance " -"during simulations." +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "" -#: ../../source/ref-changelog.md:496 +#: ../../source/ref-example-projects.rst:14 msgid "" -"**Add training metrics to** `History` **object during simulations** ([#1696]" -"(https://github.com/adap/flower/pull/1696))" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/ref-changelog.md:498 +#: ../../source/ref-example-projects.rst:17 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training metrics, " -"but previous releases did not save the results in the `History` object. This " -"is now the case!" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-example-projects.rst:18 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:19 msgid "" -"**General improvements** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), [#1647](https://github." -"com/adap/flower/pull/1647), [#1471](https://github.com/adap/flower/" -"pull/1471), [#1648](https://github.com/adap/flower/pull/1648), [#1651]" -"(https://github.com/adap/flower/pull/1651), [#1652](https://github.com/adap/" -"flower/pull/1652), [#1653](https://github.com/adap/flower/pull/1653), [#1659]" -"(https://github.com/adap/flower/pull/1659), [#1665](https://github.com/adap/" -"flower/pull/1665), [#1670](https://github.com/adap/flower/pull/1670), [#1672]" -"(https://github.com/adap/flower/pull/1672), [#1677](https://github.com/adap/" -"flower/pull/1677), [#1684](https://github.com/adap/flower/pull/1684), [#1683]" -"(https://github.com/adap/flower/pull/1683), [#1686](https://github.com/adap/" -"flower/pull/1686), [#1682](https://github.com/adap/flower/pull/1682), [#1685]" -"(https://github.com/adap/flower/pull/1685), [#1692](https://github.com/adap/" -"flower/pull/1692), [#1705](https://github.com/adap/flower/pull/1705), [#1708]" -"(https://github.com/adap/flower/pull/1708), [#1711](https://github.com/adap/" -"flower/pull/1711), [#1713](https://github.com/adap/flower/pull/1713), [#1714]" -"(https://github.com/adap/flower/pull/1714), [#1718](https://github.com/adap/" -"flower/pull/1718), [#1716](https://github.com/adap/flower/pull/1716), [#1723]" -"(https://github.com/adap/flower/pull/1723), [#1735](https://github.com/adap/" -"flower/pull/1735), [#1678](https://github.com/adap/flower/pull/1678), [#1750]" -"(https://github.com/adap/flower/pull/1750), [#1753](https://github.com/adap/" -"flower/pull/1753), [#1736](https://github.com/adap/flower/pull/1736), [#1766]" -"(https://github.com/adap/flower/pull/1766), [#1760](https://github.com/adap/" -"flower/pull/1760), [#1775](https://github.com/adap/flower/pull/1775), [#1776]" -"(https://github.com/adap/flower/pull/1776), [#1777](https://github.com/adap/" -"flower/pull/1777), [#1779](https://github.com/adap/flower/pull/1779), [#1784]" -"(https://github.com/adap/flower/pull/1784), [#1773](https://github.com/adap/" -"flower/pull/1773), [#1755](https://github.com/adap/flower/pull/1755), [#1789]" -"(https://github.com/adap/flower/pull/1789), [#1788](https://github.com/adap/" -"flower/pull/1788), [#1798](https://github.com/adap/flower/pull/1798), [#1799]" -"(https://github.com/adap/flower/pull/1799), [#1739](https://github.com/adap/" -"flower/pull/1739), [#1800](https://github.com/adap/flower/pull/1800), [#1804]" -"(https://github.com/adap/flower/pull/1804), [#1805](https://github.com/adap/" -"flower/pull/1805))" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/ref-changelog.md:514 +#: ../../source/ref-example-projects.rst:25 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, `Daniel " -"J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/ref-changelog.md:518 +#: ../../source/ref-example-projects.rst:28 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:520 -msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be used " -"to identify which workload a task belongs to. It also supports a new " -"`group_id` that can be used, for example, to indicate the current training " -"round. Both the `workload_id` and `group_id` enable client nodes to decide " -"whether they want to handle a task or not." +#: ../../source/ref-example-projects.rst:29 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" +msgstr "" + +#: ../../source/ref-example-projects.rst:35 msgid "" -"**Make Driver API and Fleet API address configurable** ([#1637](https://" -"github.com/adap/flower/pull/1637))" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/ref-changelog.md:524 +#: ../../source/ref-example-projects.rst:37 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) can " -"now configure the server address of both Driver API (via `--driver-api-" -"address`) and Fleet API (via `--fleet-api-address`) when starting:" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:526 +#: ../../source/ref-example-projects.rst:38 msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/ref-changelog.md:530 +#: ../../source/ref-example-projects.rst:44 msgid "" -"**Add new example of Federated Learning using fastai and Flower** ([#1598]" -"(https://github.com/adap/flower/pull/1598))" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/ref-changelog.md:532 +#: ../../source/ref-example-projects.rst:46 msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/" -"quickstart-fastai)." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:534 +#: ../../source/ref-example-projects.rst:47 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest " -"versions of Android** ([#1603](https://github.com/adap/flower/pull/1603))" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:536 +#: ../../source/ref-faq.rst:4 msgid "" -"The Android code example has received a substantial update: the project is " -"compatible with Flower 1.0 (and later), the UI received a full refresh, and " -"the project is updated to be compatible with newer Android tooling." +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/ref-changelog.md:538 -msgid "" -"**Add new `FedProx` strategy** ([#1619](https://github.com/adap/flower/" -"pull/1619))" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/ref-changelog.md:540 +#: ../../source/ref-faq.rst:8 msgid "" -"This [strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/" -"strategy/fedprox.py) is almost identical to [`FedAvg`](https://github.com/" -"adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py), but helps " -"users replicate what is described in this [paper](https://arxiv.org/" -"abs/1812.06127). It essentially adds a parameter called `proximal_mu` to " -"regularize the local models with respect to the global models." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/ref-changelog.md:542 +#: ../../source/ref-faq.rst:10 msgid "" -"**Add new metrics to telemetry events** ([#1640](https://github.com/adap/" -"flower/pull/1640))" +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/ref-changelog.md:544 +#: ../../source/ref-faq.rst:11 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/ref-changelog.md:546 -msgid "" -"**Add new custom strategy tutorial section** [#1623](https://github.com/adap/" -"flower/pull/1623)" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-faq.rst:15 msgid "" -"The Flower tutorial now has a new section that covers implementing a custom " -"strategy from scratch: [Open in Colab](https://colab.research.google.com/" -"github/adap/flower/blob/main/doc/source/tutorial-build-a-strategy-from-" -"scratch-pytorch.ipynb)" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/ref-changelog.md:550 -msgid "" -"**Add new custom serialization tutorial section** ([#1622](https://github." -"com/adap/flower/pull/1622))" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-faq.rst:19 msgid "" -"The Flower tutorial now has a new section that covers custom serialization: " -"[Open in Colab](https://colab.research.google.com/github/adap/flower/blob/" -"main/doc/source/tutorial-customize-the-client-pytorch.ipynb)" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/ref-changelog.md:554 +#: ../../source/ref-faq.rst:21 msgid "" -"**General improvements** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), [#1636](https://github." -"com/adap/flower/pull/1636), [#1635](https://github.com/adap/flower/" -"pull/1635), [#1633](https://github.com/adap/flower/pull/1633), [#1632]" -"(https://github.com/adap/flower/pull/1632), [#1631](https://github.com/adap/" -"flower/pull/1631), [#1630](https://github.com/adap/flower/pull/1630), [#1627]" -"(https://github.com/adap/flower/pull/1627), [#1593](https://github.com/adap/" -"flower/pull/1593), [#1616](https://github.com/adap/flower/pull/1616), [#1615]" -"(https://github.com/adap/flower/pull/1615), [#1607](https://github.com/adap/" -"flower/pull/1607), [#1609](https://github.com/adap/flower/pull/1609), [#1608]" -"(https://github.com/adap/flower/pull/1608), [#1603](https://github.com/adap/" -"flower/pull/1603), [#1590](https://github.com/adap/flower/pull/1590), [#1580]" -"(https://github.com/adap/flower/pull/1580), [#1599](https://github.com/adap/" -"flower/pull/1599), [#1600](https://github.com/adap/flower/pull/1600), [#1601]" -"(https://github.com/adap/flower/pull/1601), [#1597](https://github.com/adap/" -"flower/pull/1597), [#1595](https://github.com/adap/flower/pull/1595), [#1591]" -"(https://github.com/adap/flower/pull/1591), [#1588](https://github.com/adap/" -"flower/pull/1588), [#1589](https://github.com/adap/flower/pull/1589), [#1587]" -"(https://github.com/adap/flower/pull/1587), [#1573](https://github.com/adap/" -"flower/pull/1573), [#1581](https://github.com/adap/flower/pull/1581), [#1578]" -"(https://github.com/adap/flower/pull/1578), [#1574](https://github.com/adap/" -"flower/pull/1574), [#1572](https://github.com/adap/flower/pull/1572), [#1586]" -"(https://github.com/adap/flower/pull/1586))" +"`Android Kotlin example `_" msgstr "" -#: ../../source/ref-changelog.md:558 -msgid "" -"**Updated documentation** ([#1629](https://github.com/adap/flower/" -"pull/1629), [#1628](https://github.com/adap/flower/pull/1628), [#1620]" -"(https://github.com/adap/flower/pull/1620), [#1618](https://github.com/adap/" -"flower/pull/1618), [#1617](https://github.com/adap/flower/pull/1617), [#1613]" -"(https://github.com/adap/flower/pull/1613), [#1614](https://github.com/adap/" -"flower/pull/1614))" +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 +#: ../../source/ref-faq.rst:26 msgid "" -"As usual, the documentation has improved quite a bit. It is another step in " -"our effort to make the Flower documentation the best documentation of any " -"project. Stay tuned and as always, feel free to provide feedback!" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-changelog.md:572 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-changelog.md:576 -msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** ([#1497](https://github.com/" -"adap/flower/pull/1497), [#1552](https://github.com/adap/flower/pull/1552))" +#: ../../source/ref-faq.rst:30 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-faq.rst:31 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in this " -"series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-pack-" -"fedavg-mnist-cnn/)" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-faq.rst:32 msgid "" -"**Improve GPU support in simulations** ([#1555](https://github.com/adap/" -"flower/pull/1555))" +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/ref-changelog.md:582 +#: ../../source/ref-faq.rst:33 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated to " -"improve GPU support. The update includes some of the hard-earned lessons " -"from scaling simulations in GPU cluster environments. New defaults make " -"running GPU-based simulations substantially more robust." +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-faq.rst:34 msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** ([#1527](https://" -"github.com/adap/flower/pull/1527), [#1558](https://github.com/adap/flower/" -"pull/1558))" +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-faq.rst:35 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to use " -"on GPU instances. We listened and made improvements to all of our Jupyter " -"notebooks! Check out the updated notebooks here:" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/ref-changelog.md:588 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework/" -"tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/ref-changelog.md:589 +#: ../../source/ref-telemetry.md:3 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework/tutorial-" -"use-a-federated-learning-strategy-pytorch.html)" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-telemetry.md:5 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a-" -"strategy-from-scratch-pytorch.html)" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/ref-changelog.md:591 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/ref-changelog.md:593 -msgid "" -"**Introduce optional telemetry** ([#1533](https://github.com/adap/flower/" -"pull/1533), [#1544](https://github.com/adap/flower/pull/1544), [#1584]" -"(https://github.com/adap/flower/pull/1584))" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/ref-changelog.md:595 +#: ../../source/ref-telemetry.md:11 msgid "" -"After a [request for feedback](https://github.com/adap/flower/issues/1534) " -"from the community, the Flower open-source project introduces optional " -"collection of *anonymous* usage metrics to make well-informed decisions to " -"improve Flower. Doing this enables the Flower team to understand how Flower " -"is used and what challenges users might face." +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/ref-changelog.md:597 +#: ../../source/ref-telemetry.md:12 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.** " -"Staying true to this statement, Flower makes it easy to disable telemetry " -"for users who do not want to share anonymous usage metrics. [Read more.]" -"(https://flower.ai/docs/telemetry.html)." +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/ref-changelog.md:599 +#: ../../source/ref-telemetry.md:13 msgid "" -"**Introduce (experimental) Driver API** ([#1520](https://github.com/adap/" -"flower/pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545]" -"(https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/" -"flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551]" -"(https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/" -"flower/pull/1567))" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/ref-changelog.md:601 +#: ../../source/ref-telemetry.md:14 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API " -"will be the abstraction that many upcoming features will be built on - and " -"you can start building those things now, too." +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/ref-changelog.md:603 +#: ../../source/ref-telemetry.md:18 msgid "" -"The Driver API also enables a new execution mode in which the server runs " -"indefinitely. Multiple individual workloads can run concurrently and start " -"and stop their execution independent of the server. This is especially " -"useful for users who want to deploy Flower in production." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/ref-changelog.md:605 +#: ../../source/ref-telemetry.md:24 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward to " -"you feedback!" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/ref-changelog.md:607 +#: ../../source/ref-telemetry.md:30 msgid "" -"Please note: *The Driver API is still experimental and will likely change " -"significantly over time.*" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/ref-changelog.md:609 +#: ../../source/ref-telemetry.md:32 msgid "" -"**Add new Federated Analytics with Pandas example** ([#1469](https://github." -"com/adap/flower/pull/1469), [#1535](https://github.com/adap/flower/" -"pull/1535))" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/ref-changelog.md:611 +#: ../../source/ref-telemetry.md:34 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics " -"with Pandas and Flower. You can find it here: [quickstart-pandas](https://" -"github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/ref-changelog.md:613 +#: ../../source/ref-telemetry.md:36 msgid "" -"**Add new strategies: Krum and MultiKrum** ([#1481](https://github.com/adap/" -"flower/pull/1481))" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/ref-changelog.md:615 +#: ../../source/ref-telemetry.md:38 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum and " -"MultiKrum in their workloads." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/ref-changelog.md:617 +#: ../../source/ref-telemetry.md:40 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** ([#1495](https://" -"github.com/adap/flower/pull/1495))" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/ref-changelog.md:619 +#: ../../source/ref-telemetry.md:42 msgid "" -"The C++ code example has received a substantial update to make it compatible " -"with the latest version of Flower." +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/ref-changelog.md:621 +#: ../../source/ref-telemetry.md:44 msgid "" -"**General improvements** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), [#1506](https://github." -"com/adap/flower/pull/1506), [#1514](https://github.com/adap/flower/" -"pull/1514), [#1522](https://github.com/adap/flower/pull/1522), [#1523]" -"(https://github.com/adap/flower/pull/1523), [#1526](https://github.com/adap/" -"flower/pull/1526), [#1528](https://github.com/adap/flower/pull/1528), [#1547]" -"(https://github.com/adap/flower/pull/1547), [#1549](https://github.com/adap/" -"flower/pull/1549), [#1560](https://github.com/adap/flower/pull/1560), [#1564]" -"(https://github.com/adap/flower/pull/1564), [#1566](https://github.com/adap/" -"flower/pull/1566))" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-telemetry.md:46 msgid "" -"**Updated documentation** ([#1494](https://github.com/adap/flower/" -"pull/1494), [#1496](https://github.com/adap/flower/pull/1496), [#1500]" -"(https://github.com/adap/flower/pull/1500), [#1503](https://github.com/adap/" -"flower/pull/1503), [#1505](https://github.com/adap/flower/pull/1505), [#1524]" -"(https://github.com/adap/flower/pull/1524), [#1518](https://github.com/adap/" -"flower/pull/1518), [#1519](https://github.com/adap/flower/pull/1519), [#1515]" -"(https://github.com/adap/flower/pull/1515))" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/ref-changelog.md:629 +#: ../../source/ref-telemetry.md:48 msgid "" -"One highlight is the new [first time contributor guide](https://flower.ai/" -"docs/first-time-contributors.html): if you've never contributed on GitHub " -"before, this is the perfect place to start!" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-telemetry.md:52 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-telemetry.md:58 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, `danielnugraha`, " -"`edogab33`" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/ref-changelog.md:645 -msgid "" -"**Introduce Differential Privacy wrappers (preview)** ([#1357](https://" -"github.com/adap/flower/pull/1357), [#1460](https://github.com/adap/flower/" -"pull/1460))" +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/ref-changelog.md:647 +#: ../../source/ref-telemetry.md:66 msgid "" -"The first (experimental) preview of pluggable Differential Privacy wrappers " -"enables easy configuration and usage of differential privacy (DP). The " -"pluggable DP wrappers enable framework-agnostic **and** strategy-agnostic " -"usage of both client-side DP and server-side DP. Head over to the Flower " -"docs, a new explainer goes into more detail." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/ref-changelog.md:649 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"**New iOS CoreML code example** ([#1289](https://github.com/adap/flower/" -"pull/1289))" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/ref-changelog.md:651 -msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can be " -"built for iOS. The code example contains both Flower iOS SDK components that " -"can be used for many tasks, and one task example running on CoreML." +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" msgstr "" -#: ../../source/ref-changelog.md:653 +#: ../../source/tutorial-quickstart-android.rst:10 msgid "" -"**New FedMedian strategy** ([#1461](https://github.com/adap/flower/" -"pull/1461))" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/ref-changelog.md:655 +#: ../../source/tutorial-quickstart-android.rst:12 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by [Yin " -"et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/ref-changelog.md:657 -msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** ([#1493](https://" -"github.com/adap/flower/pull/1493))" +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" msgstr "" -#: ../../source/ref-changelog.md:659 +#: ../../source/tutorial-quickstart-fastai.rst:7 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default and " -"not just exposed to the configured `Strategy` (via the `failures` argument)." +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/ref-changelog.md:661 -msgid "" -"**Improve Virtual Client Engine internals** ([#1401](https://github.com/adap/" -"flower/pull/1401), [#1453](https://github.com/adap/flower/pull/1453))" +#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/ref-changelog.md:663 +#: ../../source/tutorial-quickstart-fastai.rst:20 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE now " -"uses Ray 2.0 under the hood, the value type of the `client_resources` " -"dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/ref-changelog.md:665 +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "그 후 가상 환경을 활성화합니다:" + +#: ../../source/tutorial-quickstart-fastai.rst:43 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual Client " -"Engine**" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/ref-changelog.md:667 -msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and " -"`NumPyClient`) methods." +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/ref-changelog.md:669 +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 msgid "" -"**Provide type information to packages using** `flwr` ([#1377](https://" -"github.com/adap/flower/pull/1377))" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:671 +#: ../../source/tutorial-quickstart-fastai.rst:110 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that the " -"package is typed. This enables typing support for projects or packages that " -"use `flwr` by enabling them to improve their code using static type checkers " -"like `mypy`." +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:673 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"**Updated code example** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/ref-changelog.md:675 -msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/ref-changelog.md:677 +#: ../../source/tutorial-quickstart-huggingface.rst:7 msgid "" -"**Updated documentation** ([#1355](https://github.com/adap/flower/" -"pull/1355), [#1558](https://github.com/adap/flower/pull/1558), [#1379]" -"(https://github.com/adap/flower/pull/1379), [#1380](https://github.com/adap/" -"flower/pull/1380), [#1381](https://github.com/adap/flower/pull/1381), [#1332]" -"(https://github.com/adap/flower/pull/1332), [#1391](https://github.com/adap/" -"flower/pull/1391), [#1403](https://github.com/adap/flower/pull/1403), [#1364]" -"(https://github.com/adap/flower/pull/1364), [#1409](https://github.com/adap/" -"flower/pull/1409), [#1419](https://github.com/adap/flower/pull/1419), [#1444]" -"(https://github.com/adap/flower/pull/1444), [#1448](https://github.com/adap/" -"flower/pull/1448), [#1417](https://github.com/adap/flower/pull/1417), [#1449]" -"(https://github.com/adap/flower/pull/1449), [#1465](https://github.com/adap/" -"flower/pull/1465), [#1467](https://github.com/adap/flower/pull/1467))" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/ref-changelog.md:679 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/ref-changelog.md:681 +#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 msgid "" -"**Restructured documentation** ([#1387](https://github.com/adap/flower/" -"pull/1387))" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/ref-changelog.md:683 +#: ../../source/tutorial-quickstart-huggingface.rst:28 msgid "" -"The documentation has been restructured to make it easier to navigate. This " -"is just the first step in a larger effort to make the Flower documentation " -"the best documentation of any project ever. Stay tuned!" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 msgid "" -"**Open in Colab button** ([#1389](https://github.com/adap/flower/pull/1389))" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/ref-changelog.md:687 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a new " -"`Open in Colab` button. No need to install anything on your local machine, " -"you can now use and learn about Flower in your browser, it's only a single " -"click away." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/ref-changelog.md:689 -msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), [#1472](https://github." -"com/adap/flower/pull/1472), [#1473](https://github.com/adap/flower/" -"pull/1473), [#1474](https://github.com/adap/flower/pull/1474), [#1475]" -"(https://github.com/adap/flower/pull/1475))" +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/ref-changelog.md:691 +#: ../../source/tutorial-quickstart-huggingface.rst:113 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved (many " -"small changes and fixes)." +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "Metadata" + +#: ../../source/tutorial-quickstart-huggingface.rst:132 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +msgid "The Model" msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/tutorial-quickstart-huggingface.rst:180 +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" +#: ../../source/tutorial-quickstart-huggingface.rst:193 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"Tons of small API cleanups resulting in a more coherent developer experience" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "클라이언트앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub Contributors](https://github." -"com/adap/flower/graphs/contributors) order):" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:710 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"[@rtaiello](https://github.com/rtaiello), [@g-pichler](https://github.com/g-" -"pichler), [@rob-luke](https://github.com/rob-luke), [@andreea-zaharia]" -"(https://github.com/andreea-zaharia), [@kinshukdua](https://github.com/" -"kinshukdua), [@nfnt](https://github.com/nfnt), [@tatiana-s](https://github." -"com/tatiana-s), [@TParcollet](https://github.com/TParcollet), [@vballoli]" -"(https://github.com/vballoli), [@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), [@hei411](https://github." -"com/hei411), [@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), [@Rubiel1](https://github." -"com/Rubiel1), [@FANTOME-PAN](https://github.com/FANTOME-PAN), [@Rono-BC]" -"(https://github.com/Rono-BC), [@lbhm](https://github.com/lbhm), [@sishtiaq]" -"(https://github.com/sishtiaq), [@remde](https://github.com/remde), [@Jueun-" -"Park](https://github.com/Jueun-Park), [@architjen](https://github.com/" -"architjen), [@PratikGarai](https://github.com/PratikGarai), [@mrinaald]" -"(https://github.com/mrinaald), [@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), [@sancarlim](https://github." -"com/sancarlim), [@gubertoli](https://github.com/gubertoli), [@Vingt100]" -"(https://github.com/Vingt100), [@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), [@jafermarq](https://github.com/" -"jafermarq), [@sisco0](https://github.com/sisco0), [@akhilmathurs](https://" -"github.com/akhilmathurs), [@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), [@pedropgusmao]" -"(https://github.com/pedropgusmao), [@tanertopal](https://github.com/" -"tanertopal), [@danieljanes](https://github.com/danieljanes)." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 msgid "" -"**All arguments must be passed as keyword arguments** ([#1338](https://" -"github.com/adap/flower/pull/1338))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/tutorial-quickstart-huggingface.rst:296 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not longer " -"supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword for " -"each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())`)." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#, fuzzy +msgid "The ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` ([#1317](https://github.com/adap/flower/" -"pull/1317))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": 600.0}" -"`, `start_server` and `start_simulation` now expect a configuration object " -"of type `flwr.server.ServerConfig`. `ServerConfig` takes the same arguments " -"that as the previous config dict, but it makes writing type-safe code easier " -"and the default parameters values more transparent." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/ref-changelog.md:722 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"**Rename built-in strategy parameters for clarity** ([#1334](https://github." -"com/adap/flower/pull/1334))" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:724 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" msgstr "" -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/tutorial-quickstart-ios.rst:15 msgid "" -"**Update default arguments of built-in strategies** ([#1278](https://github." -"com/adap/flower/pull/1278))" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently available " -"clients for training and evaluation. Projects that relied on the previous " -"default values can get the previous behaviour by initializing the strategy " -"in the following way:" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/ref-changelog.md:736 -msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` ([#1334](https://github." -"com/adap/flower/pull/1334))" +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" msgstr "" -#: ../../source/ref-changelog.md:738 -msgid "" -"The `Strategy` method `evaluate` now receives the current round of federated " -"learning/evaluation as the first parameter." +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" msgstr "" -#: ../../source/ref-changelog.md:740 +#: ../../source/tutorial-quickstart-ios.rst:36 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/ref-changelog.md:742 +#: ../../source/tutorial-quickstart-ios.rst:72 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), and " -"(3) a config dictionary (`config`)." +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/ref-changelog.md:744 -msgid "" -"**Rename** `rnd` **to** `server_round` ([#1321](https://github.com/adap/" -"flower/pull/1321))" +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" msgstr "" -#: ../../source/ref-changelog.md:746 +#: ../../source/tutorial-quickstart-ios.rst:83 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. To " -"improve reaability and avoid confusion with *random*, this parameter has " -"been renamed from `rnd` to `server_round`." +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/tutorial-quickstart-ios.rst:99 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` ([#1273](https://github.com/" -"adap/flower/pull/1273))" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." msgstr "" -#: ../../source/ref-changelog.md:750 +#: ../../source/tutorial-quickstart-ios.rst:102 msgid "" -"The experimental package `flwr.dataset` was migrated to Flower Baselines." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/ref-changelog.md:752 +#: ../../source/tutorial-quickstart-ios.rst:117 msgid "" -"**Remove experimental strategies** ([#1280](https://github.com/adap/flower/" -"pull/1280))" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/tutorial-quickstart-ios.rst:124 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." msgstr "" -#: ../../source/ref-changelog.md:756 -msgid "" -"**Rename** `Weights` **to** `NDArrays` ([#1258](https://github.com/adap/" -"flower/pull/1258), [#1259](https://github.com/adap/flower/pull/1259))" +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-scikitlearn.rst:167 +#: ../../source/tutorial-quickstart-xgboost.rst:341 +msgid "Flower Server" msgstr "" -#: ../../source/ref-changelog.md:758 +#: ../../source/tutorial-quickstart-ios.rst:131 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:760 -msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** `start_server` " -"([#1258](https://github.com/adap/flower/pull/1258), [#1259](https://github." -"com/adap/flower/pull/1259))" +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +msgid "Train the model, federated!" msgstr "" -#: ../../source/ref-changelog.md:762 +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-xgboost.rst:567 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been a " -"historic artefact, in this release it is finally gone for good." +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/tutorial-quickstart-ios.rst:152 msgid "" -"**Make** `get_parameters` **configurable** ([#1242](https://github.com/adap/" -"flower/pull/1242))" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/tutorial-quickstart-ios.rst:156 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." msgstr "" -#: ../../source/ref-changelog.md:768 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/ref-changelog.md:770 -msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the consistency " -"between `start_simulation` and `start_server` and makes transitioning " -"between the two easier." +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/tutorial-quickstart-jax.rst:10 msgid "" -"**Support Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. JAX를 사용해 " +"scikit-learn 데이터 세트에서 선형 회귀 모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 " +"Federated으로 `_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 회귀 " +"`_" +" 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 다음 centralized 트레이닝 코드를 기반으로" +" federated 방식으로 트레이닝을 실행합니다." -#: ../../source/ref-changelog.md:776 +#: ../../source/tutorial-quickstart-jax.rst:16 msgid "" -"The previous Flower release introduced experimental support for Python 3.10, " -"this release declares Python 3.10 support as stable." +"Before we start building our JAX example, we need install the packages " +":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" msgstr "" +"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, " +":code:`flwr` 패키지를 설치해야 합니다:" -#: ../../source/ref-changelog.md:778 +#: ../../source/tutorial-quickstart-jax.rst:24 +msgid "Linear Regression with JAX" +msgstr "JAX를 사용한 선형 회귀" + +#: ../../source/tutorial-quickstart-jax.rst:26 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** ([#1260]" -"(https://github.com/adap/flower/pull/1260), [#1277](https://github.com/adap/" -"flower/pull/1277))" +"We begin with a brief description of the centralized training code based " +"on a :code:`Linear Regression` model. If you want a more in-depth " +"explanation of what's going on then have a look at the official `JAX " +"documentation `_." msgstr "" +"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 간략한 설명부터 시작하겠습니다. 더 자세한 설명을" +" 원하시면 공식 `JAX 문서 `_를 참조하세요." -#: ../../source/ref-changelog.md:780 +#: ../../source/tutorial-quickstart-jax.rst:29 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that " -"implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" -msgstr "" +"Let's create a new file called :code:`jax_training.py` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " +"be imported. In addition, we need to import :code:`sklearn` since we use " +":code:`make_regression` for the dataset and :code:`train_test_split` to " +"split the dataset into a training and test set. You can see that we do " +"not yet import the :code:`flwr` package for federated learning. This will" +" be done later." +msgstr "" +"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 :code:`jax_training.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, JAX 패키지인 :code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에" +" :code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 분할하기 위해 " +":code:`train_test_split`을 사용하므로 :code:`sklearn`을 가져와야 합니다. 연합 학습을 위해 아직 " +":code:`flwr` 패키지를 가져오지 않은 것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." -#: ../../source/ref-changelog.md:782 +#: ../../source/tutorial-quickstart-jax.rst:43 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` ([#1281]" -"(https://github.com/adap/flower/pull/1281))" -msgstr "" +"The :code:`load_data()` function loads the mentioned training and test " +"sets." +msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/ref-changelog.md:784 +#: ../../source/tutorial-quickstart-jax.rst:53 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server` " -"instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the " -"Virtual Client Engine." -msgstr "" +"The model architecture (a very simple :code:`Linear Regression` model) is" +" defined in :code:`load_model()`." +msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." -#: ../../source/ref-changelog.md:786 +#: ../../source/tutorial-quickstart-jax.rst:65 msgid "" -"**Update code examples** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), [#1282](https://github." -"com/adap/flower/pull/1282))" +"We now need to define the training (function :code:`train()`), which " +"loops over the training set and measures the loss (function " +":code:`loss_fn()`) for each batch of training examples. The loss function" +" is separate since JAX takes derivatives with a :code:`grad()` function " +"(defined in the :code:`main()` function and called in :code:`train()`)." msgstr "" +"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수 " +":code:`train()`)을 정의해야 합니다. JAX는 :code:`grad()` 함수(:code:`main()` 함수에 " +"정의되고 :code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/ref-changelog.md:788 +#: ../../source/tutorial-quickstart-jax.rst:83 msgid "" -"Many code examples received small or even large maintenance updates, among " -"them are" +"The evaluation of the model is defined in the function " +":code:`evaluation()`. The function takes all test examples and measures " +"the loss of the linear regression model." msgstr "" +"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 예제를 가져와 선형 회귀 " +"모델의 손실을 측정합니다." -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" +#: ../../source/tutorial-quickstart-jax.rst:94 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the :code:`jax.grad()` function is defined in " +":code:`main()` and passed to :code:`train()`." msgstr "" +"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하여 JAX를 사용 모델을 훈련할 수 있습니다. 이미" +" 언급했듯이 :code:`jax.grad()` 함수는 :code:`main()`에 정의되어 :code:`train()`에 " +"전달됩니다." -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" -msgstr "" +#: ../../source/tutorial-quickstart-jax.rst:111 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" +#: ../../source/tutorial-quickstart-jax.rst:117 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." msgstr "" +"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 " +"사용하여 하나의 서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" -msgstr "" +#: ../../source/tutorial-quickstart-jax.rst:121 +msgid "JAX meets Flower" +msgstr "JAX와 Flower의 만남" -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" +#: ../../source/tutorial-quickstart-jax.rst:123 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +":code:`jax_training.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server*, which averages all received " +"parameter updates. This describes one round of the federated learning " +"process, and we repeat this for multiple rounds." msgstr "" +"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 " +":code:`jax_training.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " +"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 " +"업데이트의 평균을 구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 라운드에 걸쳐 반복합니다." -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" +#: ../../source/tutorial-quickstart-jax.rst:145 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined JAX training in :code:`jax_training.py`. Our" +" *client* needs to import :code:`flwr`, but also :code:`jax` and " +":code:`jaxlib` to update the parameters on our JAX model:" msgstr "" +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`jax_training.py`에서 " +"이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를" +" 업데이트하기 위해 :code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/ref-changelog.md:797 +#: ../../source/tutorial-quickstart-jax.rst:160 msgid "" -"**Remove the obsolete simulation example** ([#1328](https://github.com/adap/" -"flower/pull/1328))" -msgstr "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " +"easier to implement than :code:`Client` if you use a framework with good " +"NumPy interoperability (like JAX) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" +" to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`FlowerClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 필요한 일부 보일러플레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 " +"프레임워크(예: JAX)를 사용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. " +"code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " +"테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid ":code:`set_parameters (optional)`" +msgstr ":code:`set_parameters (선택사항)`" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "transform parameters to NumPy :code:`ndarray`'s" +msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" + +#: ../../source/tutorial-quickstart-jax.rst:174 +msgid "get the updated local model parameters and return them to the server" +msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" -#: ../../source/ref-changelog.md:799 +#: ../../source/tutorial-quickstart-jax.rst:178 +msgid "return the local loss to the server" +msgstr "로컬 손실을 서버로 반환합니다" + +#: ../../source/tutorial-quickstart-jax.rst:180 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"The challenging part is to transform the JAX model parameters from " +":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" +" `NumPyClient`." msgstr "" +"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여 " +"`NumPyClient`와 호환되도록 하는 것입니다." -#: ../../source/ref-changelog.md:801 +#: ../../source/tutorial-quickstart-jax.rst:182 msgid "" -"**Update documentation** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), [#1251](https://github." -"com/adap/flower/pull/1251), [#1257](https://github.com/adap/flower/" -"pull/1257), [#1267](https://github.com/adap/flower/pull/1267), [#1268]" -"(https://github.com/adap/flower/pull/1268), [#1300](https://github.com/adap/" -"flower/pull/1300), [#1304](https://github.com/adap/flower/pull/1304), [#1305]" -"(https://github.com/adap/flower/pull/1305), [#1307](https://github.com/adap/" -"flower/pull/1307))" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`evaluate()` previously " +"defined in :code:`jax_training.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." msgstr "" +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`를 " +"사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +":code:`NumPyClient` 서브클래스를 통해 Flower에게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 " +"있도록 유형 type annotation을 포함했습니다." + +#: ../../source/tutorial-quickstart-jax.rst:251 +msgid "Having defined the federation process, we can run it." +msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." -#: ../../source/ref-changelog.md:803 +#: ../../source/tutorial-quickstart-jax.rst:280 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the `flwr." -"common` module in the API reference, includes support for markdown-based " -"documentation, migrates the changelog from `.rst` to `.md`, and fixes a " -"number of smaller details!" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" msgstr "" +"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX 프로젝트를 확인합니다. " +"축하합니다!" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" +#: ../../source/tutorial-quickstart-jax.rst:285 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." msgstr "" +"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습니다: 'Quickstart JAX " +"`_. 두 " +"클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예제는 다소 단순화되어 있습니다." -#: ../../source/ref-changelog.md:807 +#: ../../source/tutorial-quickstart-jax.rst:288 msgid "" -"Add round number to fit and evaluate log messages ([#1266](https://github." -"com/adap/flower/pull/1266))" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" msgstr "" +"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 집합을 사용해 보는 것은 어떨까요? " +"클라이언트를 더 추가하는 것은 어떨까요?" + +#: ../../source/tutorial-quickstart-mlx.rst:5 +#, fuzzy +msgid "Quickstart MLX" +msgstr "빠른 시작" -#: ../../source/ref-changelog.md:808 +#: ../../source/tutorial-quickstart-mlx.rst:7 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example ([#847]" -"(https://github.com/adap/flower/pull/847))" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:809 +#: ../../source/tutorial-quickstart-mlx.rst:12 msgid "" -"Update developer tooling ([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), [#1301](https://github." -"com/adap/flower/pull/1301), [#1310](https://github.com/adap/flower/" -"pull/1310))" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/tutorial-quickstart-mlx.rst:27 msgid "" -"Rename ProtoBuf messages to improve consistency ([#1214](https://github.com/" -"adap/flower/pull/1214), [#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/tutorial-quickstart-mlx.rst:57 +msgid "To run the project do:" msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/tutorial-quickstart-mlx.rst:106 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** ([#919](https://" -"github.com/adap/flower/pull/919), [#1127](https://github.com/adap/flower/" -"pull/1127), [#914](https://github.com/adap/flower/pull/914))" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/tutorial-quickstart-mlx.rst:122 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how to " -"use [Flower Baselines](https://flower.ai/docs/using-baselines.html). With " -"this first preview release we're also inviting the community to [contribute " -"their own baselines](https://flower.ai/docs/baselines/how-to-contribute-" -"baselines.html)." +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/tutorial-quickstart-mlx.rst:166 msgid "" -"**C++ client SDK (preview) and code example** ([#1111](https://github.com/" -"adap/flower/pull/1111))" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/tutorial-quickstart-mlx.rst:190 msgid "" -"Preview support for Flower clients written in C++. The C++ preview includes " -"a Flower client SDK and a quickstart code example that demonstrates a simple " -"C++ client using the SDK." +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/tutorial-quickstart-mlx.rst:212 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** ([#1135]" -"(https://github.com/adap/flower/pull/1135))" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -#: ../../source/ref-changelog.md:826 -msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due to " -"be released in October. This Flower release adds experimental support for " -"both Python versions." +#: ../../source/tutorial-quickstart-mlx.rst:218 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/ref-changelog.md:828 +#: ../../source/tutorial-quickstart-mlx.rst:231 msgid "" -"**Aggregate custom metrics through user-provided functions** ([#1144]" -"(https://github.com/adap/flower/pull/1144))" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/tutorial-quickstart-mlx.rst:240 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to " -"customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/ref-changelog.md:832 +#: ../../source/tutorial-quickstart-mlx.rst:255 msgid "" -"**User-configurable round timeout** ([#1162](https://github.com/adap/flower/" -"pull/1162))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/ref-changelog.md:834 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary contains a " -"`round_timeout` key (with a `float` value in seconds), the server will wait " -"*at least* `round_timeout` seconds before it closes the connection." +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/ref-changelog.md:836 -msgid "" -"**Enable both federated evaluation and centralized evaluation to be used at " -"the same time in all built-in strategies** ([#1091](https://github.com/adap/" -"flower/pull/1091))" +#: ../../source/tutorial-quickstart-mlx.rst:275 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/ref-changelog.md:838 +#: ../../source/tutorial-quickstart-mlx.rst:285 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., client-" -"side) and centralized evaluation (i.e., server-side) in the same round. " -"Federated evaluation can be disabled by setting `fraction_eval` to `0.0`." +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/ref-changelog.md:840 -msgid "" -"**Two new Jupyter Notebook tutorials** ([#1141](https://github.com/adap/" -"flower/pull/1141))" +#: ../../source/tutorial-quickstart-mlx.rst:290 +msgid "Putting everything together we have:" msgstr "" -#: ../../source/ref-changelog.md:842 +#: ../../source/tutorial-quickstart-mlx.rst:344 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain basic " -"and intermediate Flower features:" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/ref-changelog.md:844 +#: ../../source/tutorial-quickstart-mlx.rst:378 msgid "" -"*An Introduction to Federated Learning*: [Open in Colab](https://colab." -"research.google.com/github/adap/flower/blob/main/tutorials/Flower-1-Intro-to-" -"FL-PyTorch.ipynb)" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/ref-changelog.md:846 +#: ../../source/tutorial-quickstart-mlx.rst:402 +#: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 msgid "" -"*Using Strategies in Federated Learning*: [Open in Colab](https://colab." -"research.google.com/github/adap/flower/blob/main/tutorials/Flower-2-" -"Strategies-in-FL-PyTorch.ipynb)" +"Congratulations! You've successfully built and run your first federated " +"learning system." msgstr "" -#: ../../source/ref-changelog.md:848 +#: ../../source/tutorial-quickstart-mlx.rst:407 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** ([#1076]" -"(https://github.com/adap/flower/pull/1076))" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:850 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/ref-changelog.md:852 -msgid "" -"**New advanced PyTorch code example** ([#1007](https://github.com/adap/" -"flower/pull/1007))" +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/ref-changelog.md:854 -msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/ref-changelog.md:856 +#: ../../source/tutorial-quickstart-pandas.rst:12 msgid "" -"**New JAX code example** ([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/ref-changelog.md:858 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/ref-changelog.md:862 +#: ../../source/tutorial-quickstart-pytorch.rst:7 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/tutorial-quickstart-pytorch.rst:12 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:864 +#: ../../source/tutorial-quickstart-pytorch.rst:27 msgid "" -"New documentation for [implementing strategies](https://flower.ai/docs/" -"framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/" -"flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/tutorial-quickstart-pytorch.rst:121 msgid "" -"New mobile-friendly documentation theme ([#1174](https://github.com/adap/" -"flower/pull/1174))" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/tutorial-quickstart-pytorch.rst:159 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) ([#1205](https://github.com/adap/" -"flower/pull/1205))" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/ref-changelog.md:870 +#: ../../source/tutorial-quickstart-pytorch.rst:184 msgid "" -"**Remove deprecated support for Python 3.6** ([#871](https://github.com/adap/" -"flower/pull/871))" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:871 +#: ../../source/tutorial-quickstart-pytorch.rst:236 msgid "" -"**Remove deprecated KerasClient** ([#857](https://github.com/adap/flower/" -"pull/857))" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/tutorial-quickstart-pytorch.rst:294 msgid "" -"**Remove deprecated no-op extra installs** ([#973](https://github.com/adap/" -"flower/pull/973))" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:873 +#: ../../source/tutorial-quickstart-pytorch.rst:323 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/tutorial-quickstart-pytorch.rst:365 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** ([#1107]" -"(https://github.com/adap/flower/pull/1107))" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:875 -msgid "" -"**Remove deprecated DefaultStrategy strategy** ([#1142](https://github.com/" -"adap/flower/pull/1142))" -msgstr "" +#: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#, fuzzy +msgid "Video tutorial" +msgstr "튜토리얼" -#: ../../source/ref-changelog.md:876 +#: ../../source/tutorial-quickstart-pytorch.rst:376 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** ([#1142]" -"(https://github.com/adap/flower/pull/1142))" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:877 -msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:883 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), [#872]" -"(https://github.com/adap/flower/pull/872), [#833](https://github.com/adap/" -"flower/pull/833), [#1036](https://github.com/adap/flower/pull/1036))" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/ref-changelog.md:885 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) now " -"work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/ref-changelog.md:887 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 msgid "" -"**New Jupyter Notebook code example** ([#833](https://github.com/adap/flower/" -"pull/833))" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/ref-changelog.md:889 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower simulations " -"using the Virtual Client Engine through Jupyter Notebook (incl. Google " -"Colab)." +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:891 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"**Client properties (feature preview)** ([#795](https://github.com/adap/" -"flower/pull/795))" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/ref-changelog.md:893 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side " -"strategies to query client properties." +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/ref-changelog.md:895 +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 msgid "" -"**Experimental Android support with TFLite** ([#865](https://github.com/adap/" -"flower/pull/865))" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:897 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has become a " -"lot easier." +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:899 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 msgid "" -"The example uses TFLite on the client side, along with a new `FedAvgAndroid` " -"strategy. The Android client and `FedAvgAndroid` are still experimental, but " -"they are a first step towards a fully-fledged Android SDK and a unified " -"`FedAvg` implementation that integrated the new functionality from " -"`FedAvgAndroid`." +"Our example consists of one *server* and two *clients* all having the " +"same model." msgstr "" -#: ../../source/ref-changelog.md:901 +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default keepalive " -"time** ([#1069](https://github.com/adap/flower/pull/1069))" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." msgstr "" -#: ../../source/ref-changelog.md:903 +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, Microsoft " -"Azure). Users can configure the keepalive time to customize the gRPC stack " -"based on specific requirements." +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/ref-changelog.md:905 -msgid "" -"**New differential privacy example using Opacus and PyTorch** ([#805]" -"(https://github.com/adap/flower/pull/805))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/ref-changelog.md:907 -msgid "" -"A new code example (`opacus`) demonstrates differentially-private federated " -"learning with Opacus, PyTorch, and Flower." +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 msgid "" -"**New Hugging Face Transformers code example** ([#863](https://github.com/" -"adap/flower/pull/863))" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" msgstr "" -#: ../../source/ref-changelog.md:911 -msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of Hugging " -"Face Transformers with Flower." +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" msgstr "" -#: ../../source/ref-changelog.md:913 -msgid "" -"**New MLCube code example** ([#779](https://github.com/adap/flower/" -"pull/779), [#1034](https://github.com/adap/flower/pull/1034), [#1065]" -"(https://github.com/adap/flower/pull/1065), [#1090](https://github.com/adap/" -"flower/pull/1090))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:915 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube with " -"Flower." +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" msgstr "" -#: ../../source/ref-changelog.md:917 -msgid "" -"**SSL-enabled server and client** ([#842](https://github.com/adap/flower/" -"pull/842), [#844](https://github.com/adap/flower/pull/844), [#845](https://" -"github.com/adap/flower/pull/845), [#847](https://github.com/adap/flower/" -"pull/847), [#993](https://github.com/adap/flower/pull/993), [#994](https://" -"github.com/adap/flower/pull/994))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:919 -msgid "" -"SSL enables secure encrypted connections between clients and servers. This " -"release open-sources the Flower secure gRPC implementation to make encrypted " -"communication channels accessible to all Flower users." +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid ":code:`set_initial_params()`" msgstr "" -#: ../../source/ref-changelog.md:921 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** ([#885](https://" -"github.com/adap/flower/pull/885), [#895](https://github.com/adap/flower/" -"pull/895))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/ref-changelog.md:923 +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive Federated " -"Optimization paper." +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/ref-changelog.md:925 +#: ../../source/tutorial-quickstart-scikitlearn.rst:67 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** ([#860]" -"(https://github.com/adap/flower/pull/860))" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +":code:`FederatedDataset.load_partition()` method loads the partitioned " +"training set for each partition ID defined in the :code:`--partition-id` " +"argument." msgstr "" -#: ../../source/ref-changelog.md:927 +#: ../../source/tutorial-quickstart-scikitlearn.rst:95 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/tutorial-quickstart-scikitlearn.rst:107 msgid "" -"Update `num_examples` calculation in PyTorch code examples in ([#909]" -"(https://github.com/adap/flower/pull/909))" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/ref-changelog.md:932 +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 msgid "" -"Expose Flower version through `flwr.__version__` ([#952](https://github.com/" -"adap/flower/pull/952))" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" msgstr "" -#: ../../source/ref-changelog.md:933 -msgid "" -"`start_server` in `app.py` now returns a `History` object containing metrics " -"from training ([#974](https://github.com/adap/flower/pull/974))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/ref-changelog.md:934 -msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable ([#978]" -"(https://github.com/adap/flower/pull/978))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid ":code:`set_parameters` (optional)" msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 msgid "" -"Increase sleep time after server start to three seconds in all code examples " -"([#1086](https://github.com/adap/flower/pull/1086))" +"update the local model weights with the parameters received from the " +"server" msgstr "" -#: ../../source/ref-changelog.md:936 -msgid "" -"Added a new FAQ section to the documentation ([#948](https://github.com/adap/" -"flower/pull/948))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:122 +msgid "is directly imported with :code:`utils.set_model_params()`" msgstr "" -#: ../../source/ref-changelog.md:937 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "set the local model weights" msgstr "" -#: ../../source/ref-changelog.md:941 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid "train the local model" msgstr "" -#: ../../source/ref-changelog.md:943 -msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in an " -"upcoming release." +#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#, fuzzy +msgid "return the updated local model weights" +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid "test the local model" msgstr "" -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/ref-changelog.md:949 +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 msgid "" -"**Experimental virtual client engine** ([#781](https://github.com/adap/" -"flower/pull/781) [#790](https://github.com/adap/flower/pull/790) [#791]" -"(https://github.com/adap/flower/pull/791))" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:951 +#: ../../source/tutorial-quickstart-scikitlearn.rst:160 msgid "" -"One of Flower's goals is to enable research at scale. This release enables a " -"first (experimental) peek at a major new feature, codenamed the virtual " -"client engine. Virtual clients enable simulations that scale to a (very) " -"large number of clients on a single machine or compute cluster. The easiest " -"way to test the new functionality is to look at the two new code examples " -"called `quickstart_simulation` and `simulation_pytorch`." +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." msgstr "" -#: ../../source/ref-changelog.md:953 +#: ../../source/tutorial-quickstart-scikitlearn.rst:169 msgid "" -"The feature is still experimental, so there's no stability guarantee for the " -"API. It's also not quite ready for prime time and comes with a few known " -"caveats. However, those who are curious are encouraged to try it out and " -"share their thoughts." +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:955 -msgid "" -"**New built-in strategies** ([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:172 +msgid ":code:`server.py`, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:957 +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy. Note that we also make use of Flower" +" Datasets here to load the test split of the MNIST dataset for server-" +"side evaluation." msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/tutorial-quickstart-scikitlearn.rst:213 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." msgstr "" -#: ../../source/ref-changelog.md:960 +#: ../../source/tutorial-quickstart-scikitlearn.rst:232 msgid "" -"**New PyTorch Lightning code example** ([#617](https://github.com/adap/" -"flower/pull/617))" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/tutorial-quickstart-scikitlearn.rst:239 +#: ../../source/tutorial-quickstart-xgboost.rst:575 msgid "" -"**New Variational Auto-Encoder code example** ([#752](https://github.com/" -"adap/flower/pull/752))" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/ref-changelog.md:964 -msgid "" -"**New scikit-learn code example** ([#748](https://github.com/adap/flower/" -"pull/748))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:246 +#: ../../source/tutorial-quickstart-xgboost.rst:582 +msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/tutorial-quickstart-scikitlearn.rst:252 +#: ../../source/tutorial-quickstart-xgboost.rst:588 msgid "" -"**New experimental TensorBoard strategy** ([#789](https://github.com/adap/" -"flower/pull/789))" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/tutorial-quickstart-scikitlearn.rst:286 msgid "" -"Improved advanced TensorFlow code example ([#769](https://github.com/adap/" -"flower/pull/769))" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." msgstr "" -#: ../../source/ref-changelog.md:971 +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" -"Warning when `min_available_clients` is misconfigured ([#830](https://github." -"com/adap/flower/pull/830))" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/ref-changelog.md:972 -msgid "" -"Improved gRPC server docs ([#841](https://github.com/adap/flower/pull/841))" +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/tutorial-quickstart-tensorflow.rst:7 msgid "" -"Improved error message in `NumPyClient` ([#851](https://github.com/adap/" -"flower/pull/851))" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/tutorial-quickstart-tensorflow.rst:13 msgid "" -"Improved PyTorch quickstart code example ([#852](https://github.com/adap/" -"flower/pull/852))" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/tutorial-quickstart-tensorflow.rst:28 msgid "" -"**Disabled final distributed evaluation** ([#800](https://github.com/adap/" -"flower/pull/800))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/tutorial-quickstart-tensorflow.rst:118 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on " -"all connected clients, which is often not required (e.g., when using server-" -"side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/tutorial-quickstart-tensorflow.rst:147 msgid "" -"**Renamed q-FedAvg strategy** ([#802](https://github.com/adap/flower/" -"pull/802))" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/ref-changelog.md:984 +#: ../../source/tutorial-quickstart-tensorflow.rst:178 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect the " -"notation given in the original paper (q-FFL is the optimization objective, q-" -"FedAvg is the proposed solver). Note the original (now deprecated) " -"`QffedAvg` class is still available for compatibility reasons (it will be " -"removed in a future release)." +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/tutorial-quickstart-tensorflow.rst:212 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` ([#791](https://github.com/adap/flower/pull/791))" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/tutorial-quickstart-tensorflow.rst:247 msgid "" -"This example has been replaced by a new example. The new example is based on " -"the experimental virtual client engine, which will become the new default " -"way of doing most types of large-scale simulations in Flower. The existing " -"example was kept for reference purposes, but it might be removed in the " -"future." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/tutorial-quickstart-tensorflow.rst:299 msgid "" -"**New built-in strategies** ([#549](https://github.com/adap/flower/pull/549))" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/ref-changelog.md:999 -msgid "" -"**Custom metrics for server and strategies** ([#717](https://github.com/adap/" -"flower/pull/717))" +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/ref-changelog.md:1001 -msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" msgstr "" -#: ../../source/ref-changelog.md:1003 +#: ../../source/tutorial-quickstart-xgboost.rst:16 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are " -"returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and they " -"enable evaluation functions passed to built-in strategies (via `eval_fn`) to " -"return more than two evaluation metrics. Strategies can even return " -"*aggregated* metrics dictionaries for the server to keep track of." +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/ref-changelog.md:1005 +#: ../../source/tutorial-quickstart-xgboost.rst:20 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate " -"from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/ref-changelog.md:1007 -msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" msgstr "" -#: ../../source/ref-changelog.md:1009 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "" -"**Migration warnings for deprecated functionality** ([#690](https://github." -"com/adap/flower/pull/690))" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/ref-changelog.md:1011 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces detailed " -"warning messages if usage of deprecated APIs is detected. The new warning " -"messages often provide details on how to migrate to more recent APIs, thus " -"easing the transition from one release to another." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." msgstr "" -#: ../../source/ref-changelog.md:1013 +#: ../../source/tutorial-quickstart-xgboost.rst:30 msgid "" -"Improved docs and docstrings ([#691](https://github.com/adap/flower/" -"pull/691) [#692](https://github.com/adap/flower/pull/692) [#713](https://" -"github.com/adap/flower/pull/713))" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" msgstr "" -#: ../../source/ref-changelog.md:1017 +#: ../../source/tutorial-quickstart-xgboost.rst:39 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) [#702](https://github.com/" -"adap/flower/pull/702) [#705](https://github.com/adap/flower/pull/705))" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:1021 +#: ../../source/tutorial-quickstart-xgboost.rst:41 msgid "" -"**Serialization-agnostic server** ([#721](https://github.com/adap/flower/" -"pull/721))" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/ref-changelog.md:1023 +#: ../../source/tutorial-quickstart-xgboost.rst:47 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of class " -"`Weights` (which represents parameters as deserialized NumPy ndarrays) was " -"replaced by class `Parameters` (e.g., in `Strategy`). `Parameters` objects " -"are fully serialization-agnostic and represents parameters as byte arrays, " -"the `tensor_type` attributes indicates how these byte arrays should be " -"interpreted (e.g., for serialization/deserialization)." +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" msgstr "" -#: ../../source/ref-changelog.md:1025 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"Built-in strategies implement this approach by handling serialization and " -"deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR [#721](https://github.com/adap/" -"flower/pull/721) to see how strategies can easily migrate to the new format." +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." msgstr "" -#: ../../source/ref-changelog.md:1027 +#: ../../source/tutorial-quickstart-xgboost.rst:60 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use `flwr.server.Server." -"evaluate_round` instead ([#717](https://github.com/adap/flower/pull/717))" -msgstr "" - -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" msgstr "" -#: ../../source/ref-changelog.md:1033 -msgid "" -"**Server-side parameter initialization** ([#658](https://github.com/adap/" -"flower/pull/658))" +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/ref-changelog.md:1035 +#: ../../source/tutorial-quickstart-xgboost.rst:89 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/ref-changelog.md:1037 +#: ../../source/tutorial-quickstart-xgboost.rst:102 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies will " -"provide these initial parameters to the server on startup and then delete " -"them to free the memory afterwards." +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " +"the partition for the given client based on :code:`partition_id`:" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/tutorial-quickstart-xgboost.rst:121 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." msgstr "" -#: ../../source/ref-changelog.md:1060 +#: ../../source/tutorial-quickstart-xgboost.rst:134 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to `flwr.server." -"strategy.FedAvg`, which is equivalent)" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" msgstr "" -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/ref-changelog.md:1066 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) [#572](https://github.com/" -"adap/flower/pull/572) [#633](https://github.com/adap/flower/pull/633))" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." msgstr "" -#: ../../source/ref-changelog.md:1068 -msgid "" -"Clients can now return an additional dictionary mapping `str` keys to values " -"of the following types: `bool`, `bytes`, `float`, `int`, `str`. This means " -"one can return almost arbitrary values from `fit`/`evaluate` and make use of " -"them on the server side!" +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/ref-changelog.md:1070 +#: ../../source/tutorial-quickstart-xgboost.rst:183 msgid "" -"This improvement also allowed for more consistent return types between `fit` " -"and `evaluate`: `evaluate` should now return a tuple `(float, int, dict)` " -"representing the loss, number of examples, and a dictionary holding " -"arbitrary problem-specific values like accuracy." +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." msgstr "" -#: ../../source/ref-changelog.md:1072 +#: ../../source/tutorial-quickstart-xgboost.rst:205 msgid "" -"In case you wondered: this feature is compatible with existing projects, the " -"additional dictionary return value is optional. New code should however " -"migrate to the new return types to be compatible with upcoming Flower " -"releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, `evaluate`: " -"`float, int, Dict[str, Scalar]`). See the example below for details." +"All required parameters defined above are passed to :code:`XgbClient`'s " +"constructor." msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/tutorial-quickstart-xgboost.rst:207 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." msgstr "" -#: ../../source/ref-changelog.md:1089 +#: ../../source/tutorial-quickstart-xgboost.rst:221 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** `Client." -"evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." msgstr "" -#: ../../source/ref-changelog.md:1091 +#: ../../source/tutorial-quickstart-xgboost.rst:262 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means that " -"dictionary values were expected to be strings. The new release generalizes " -"this to enable values of the following types: `bool`, `bytes`, `float`, " -"`int`, `str`." +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. From the second round, we load the global " +"model sent from server to new build Booster object, and then update model" +" weights on local training data with function :code:`local_boost` as " +"follows:" msgstr "" -#: ../../source/ref-changelog.md:1093 +#: ../../source/tutorial-quickstart-xgboost.rst:281 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-side " -"and `int(config[\"epochs\"])` on the client side!" +"Given :code:`num_local_round`, we update trees by calling " +":code:`bst_input.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." msgstr "" -#: ../../source/ref-changelog.md:1095 +#: ../../source/tutorial-quickstart-xgboost.rst:313 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"In :code:`evaluate`, after loading the global model, we call " +":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" +" value will be returned." msgstr "" -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:1116 +#: ../../source/tutorial-quickstart-xgboost.rst:332 msgid "" -"New example: PyTorch From Centralized To Federated ([#549](https://github." -"com/adap/flower/pull/549))" +"That's it for the client. We only have to implement :code:`Client` and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." msgstr "" -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" +#: ../../source/tutorial-quickstart-xgboost.rst:343 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/ref-changelog.md:1118 +#: ../../source/tutorial-quickstart-xgboost.rst:346 msgid "" -"New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." msgstr "" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/tutorial-quickstart-xgboost.rst:348 +msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/ref-changelog.md:1120 +#: ../../source/tutorial-quickstart-xgboost.rst:380 msgid "" -"Updated examples documentation ([#549](https://github.com/adap/flower/" -"pull/549))" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients. The :code:`config_func` " +"function is to return the current FL round number to client's " +":code:`fit()` and :code:`evaluate()` methods." msgstr "" -#: ../../source/ref-changelog.md:1121 -msgid "" -"Removed obsolete documentation ([#548](https://github.com/adap/flower/" -"pull/548))" +#: ../../source/tutorial-quickstart-xgboost.rst:384 +msgid "Then, we start the server:" msgstr "" -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" +#: ../../source/tutorial-quickstart-xgboost.rst:396 +msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/ref-changelog.md:1125 +#: ../../source/tutorial-quickstart-xgboost.rst:398 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the " -"clients is now handled in `flwr.server.start_server` ([#553](https://github." -"com/adap/flower/pull/553) [#540](https://github.com/adap/flower/issues/540))." -msgstr "" - -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" +#: ../../source/tutorial-quickstart-xgboost.rst:400 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/tutorial-quickstart-xgboost.rst:496 msgid "" -"Added an example for embedded devices ([#507](https://github.com/adap/flower/" -"pull/507))" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" msgstr "" -#: ../../source/ref-changelog.md:1132 +#: ../../source/tutorial-quickstart-xgboost.rst:555 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) ([#504]" -"(https://github.com/adap/flower/pull/504) [#508](https://github.com/adap/" -"flower/pull/508))" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/tutorial-quickstart-xgboost.rst:560 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into the " -"top-level `examples` directory ([#494](https://github.com/adap/flower/" -"pull/494) [#512](https://github.com/adap/flower/pull/512))" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/tutorial-quickstart-xgboost.rst:565 +msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" +#: ../../source/tutorial-quickstart-xgboost.rst:641 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." msgstr "" -#: ../../source/ref-changelog.md:1139 +#: ../../source/tutorial-quickstart-xgboost.rst:646 msgid "" -"Renamed strategy methods ([#486](https://github.com/adap/flower/pull/486)) " -"to unify the naming of Flower's public APIs. Other public methods/functions " -"(e.g., every method in `Client`, but also `Strategy.evaluate`) do not use " -"the `on_` prefix, which is why we're removing it from the four methods in " -"Strategy. To migrate rename the following `Strategy` methods accordingly:" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." msgstr "" -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/tutorial-quickstart-xgboost.rst:650 +msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/tutorial-quickstart-xgboost.rst:659 +msgid "Cyclic training" msgstr "" -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/tutorial-quickstart-xgboost.rst:661 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/ref-changelog.md:1147 +#: ../../source/tutorial-quickstart-xgboost.rst:665 msgid "" -"Deprecated `DefaultStrategy` ([#479](https://github.com/adap/flower/" -"pull/479)). To migrate use `FedAvg` instead." +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/ref-changelog.md:1148 +#: ../../source/tutorial-quickstart-xgboost.rst:705 msgid "" -"Simplified examples and baselines ([#484](https://github.com/adap/flower/" -"pull/484))." +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/tutorial-quickstart-xgboost.rst:746 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface ([#483]" -"(https://github.com/adap/flower/pull/483))." +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." msgstr "" -#: ../../source/ref-changelog.md:1150 +#: ../../source/tutorial-quickstart-xgboost.rst:749 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 ([#471](https://github." -"com/adap/flower/pull/471))." +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/ref-changelog.md:1151 +#: ../../source/tutorial-quickstart-xgboost.rst:813 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:815 msgid "" -"Improved `Strategy` docstrings ([#470](https://github.com/adap/flower/" -"pull/470))." +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/tutorial-quickstart-xgboost.rst:846 +msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/tutorial-quickstart-xgboost.rst:848 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate how " -"Flower can be used to federate different kinds of existing machine learning " -"pipelines, usually leveraging popular machine learning frameworks such as " -"`PyTorch `_ or `TensorFlow `_." +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/tutorial-quickstart-xgboost.rst:880 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/tutorial-quickstart-xgboost.rst:883 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image classification " -"with MobileNetV2:" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." msgstr "" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/tutorial-quickstart-xgboost.rst:887 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:888 msgid "" -"`Quickstart TensorFlow (Code) `_" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/tutorial-quickstart-xgboost.rst:922 msgid "" -":doc:`Quickstart TensorFlow (Tutorial) `" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/tutorial-quickstart-xgboost.rst:977 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/tutorial-quickstart-xgboost.rst:980 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/tutorial-quickstart-xgboost.rst:1031 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a " -"simple Convolutional Neural Network:" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/tutorial-quickstart-xgboost.rst:1051 msgid "" -"`Quickstart PyTorch (Code) `_" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/tutorial-quickstart-xgboost.rst:1094 +msgid "Arguments parser" msgstr "" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/tutorial-quickstart-xgboost.rst:1096 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/tutorial-quickstart-xgboost.rst:1142 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`PyTorch: From Centralized To Federated (Code) `_" +#: ../../source/tutorial-quickstart-xgboost.rst:1146 +msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/tutorial-quickstart-xgboost.rst:1200 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." msgstr "" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/tutorial-quickstart-xgboost.rst:1204 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/ref-example-projects.rst:44 -msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +#: ../../source/tutorial-quickstart-xgboost.rst:1282 +msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/ref-example-projects.rst:46 -msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_" +#: ../../source/tutorial-quickstart-xgboost.rst:1285 +msgid "Example commands" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/tutorial-quickstart-xgboost.rst:1287 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" msgstr "" -#: ../../source/ref-faq.rst:4 -msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +#: ../../source/tutorial-quickstart-xgboost.rst:1294 +msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/tutorial-quickstart-xgboost.rst:1300 +msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/tutorial-quickstart-xgboost.rst:1306 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to " -"make it work even better on Colab. Here's a quickstart example:" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." msgstr "" -#: ../../source/ref-faq.rst:10 -msgid "" -"`Flower simulation PyTorch `_" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" -"`Flower simulation TensorFlow/Keras `_" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_ " -"and the corresponding `GitHub code example `_." +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/ref-faq.rst -msgid "" -":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" msgstr "" -#: ../../source/ref-faq.rst:19 -msgid "" -"Yes, it does. Please take a look at our `blog post `_ or " -"check out the code examples:" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 msgid "" -"`Android Kotlin example `_" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/ref-faq.rst:29 -msgid "" -"`Flower meets Nevermined YouTube video `_." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 msgid "" -"`Flower meets KOSMoS `_." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -#: ../../source/ref-faq.rst:31 -msgid "" -"`Flower meets Talan blog post `_ ." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 msgid "" -"`Flower meets Talan GitHub Repository `_ ." +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to make " -"well-informed decisions to improve Flower. Doing this enables the Flower " -"team to understand how Flower is used and what challenges users might face." +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.** " -"Staying true to this statement, Flower makes it easy to disable telemetry " -"for users that do not want to share anonymous usage metrics." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" msgstr "" -#: ../../source/ref-telemetry.md:9 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 msgid "" -"We follow strong principles guarding anonymous usage metrics collection:" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to learn " -"“[How to opt-out](#how-to-opt-out)”." +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -#: ../../source/ref-telemetry.md:12 -msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not contain " -"any personally identifiable information (PII). See “[Collected metrics]" -"(#collected-metrics)” to understand what metrics are being reported." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-to-" -"inspect-what-is-being-reported)”" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 msgid "" -"**Open for feedback:** You can always reach out to us if you have feedback; " -"see the section “[How to contact us](#how-to-contact-us)” for details." +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "" - -#: ../../source/ref-telemetry.md:18 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example, `." -"bashrc` (or whatever configuration file applies to your environment) to " -"disable Flower telemetry permanently." +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"**Flower version.** Understand which versions of Flower are currently being " -"used. This helps us to decide whether we should invest effort into releasing " -"a patch version for an older version of Flower or instead use the bandwidth " -"to build new features." +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" msgstr "" -#: ../../source/ref-telemetry.md:34 -msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 msgid "" -"**Hardware properties.** Understanding the hardware environment that Flower " -"is being used in helps to decide whether we should, for example, put more " -"effort into supporting low-resource environments." +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." msgstr "" -#: ../../source/ref-telemetry.md:38 -msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables us " -"to understand how heavily certain features are being used and better " -"prioritize based on that." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete them." +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in `~/.flwr/" -"source` the first time a telemetry event is generated. The source ID is " -"important to identify whether an issue is recurring or whether an issue is " -"triggered by multiple clusters running concurrently (which often happens in " -"simulation). For example, if a device runs multiple workloads at the same " -"time, and this results in an issue, then, in order to reproduce the issue, " -"multiple workloads must be started at the same time." +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 msgid "" -"You may delete the source ID at any time. If you wish for all events logged " -"under a specific source ID to be deleted, you can send a deletion request " -"mentioning the source ID to `telemetry@flower.ai`. All events related to " -"that source ID will then be permanently deleted." +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 msgid "" -"We will not collect any personally identifiable information. If you think " -"any of the metrics collected could be misused in any way, please [get in " -"touch with us](#how-to-contact-us). We will update this page to reflect any " -"changes to the metrics collected and publish changes in the changelog." +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"This works as expected, ten clients are training for three rounds of " +"federated learning." msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information by " -"setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging is " -"disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics, " -"use both environment variables:" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android app " -"using Flower." +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 msgid "" -"Let's build a federated learning system using TFLite and Flower on Android!" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 msgid "" -"Please refer to the `full code example `_ to learn more." +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"FastAI to train a vision model on CIFAR-10." +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 msgid "" -"Please refer to the `full code example `_ to learn more." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower with " -"HuggingFace Transformers in order to fine-tune an LLM." +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 msgid "" -"Let's build a federated learning system using Hugging Face Transformers and " -"Flower!" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 msgid "" -"We will leverage Hugging Face to federate the training of language models " -"over multiple clients using Flower. More specifically, we will fine-tune a " -"pre-trained Transformer model (distilBERT) for sequence classification over " -"a dataset of IMDB ratings. The end goal is to detect if a movie rating is " -"positive or negative." +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, " -"and :code:`transformers`. This can be done using :code:`pip`:" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 msgid "" -"Once we have a way of creating our trainloader and testloader, we can take " -"care of the training and testing. This is very similar to any :code:" -"`PyTorch` training or testing loop:" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT " -"model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). This " -"is very easy, as our model is a standard :code:`PyTorch` model:" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the server " -"to send its parameters to the client. Finally, the :code:`fit` function " -"trains the model locally for the client, and the :code:`evaluate` function " -"tests the model locally and returns the relevant metrics." +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 -msgid "" -"Now that we have a way to instantiate clients, we need to create our server " -"in order to aggregate the results. Using Flower, this can be done very " -"easily by first choosing a strategy (here, we are using :code:`FedAvg`, " -"which will define the global weights as the average of all the clients' " -"weights at each round) and then using the :code:`flwr.server.start_server` " -"function:" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 msgid "" -"The :code:`weighted_average` function is there to provide a way to aggregate " -"the metrics distributed amongst the clients (basically this allows us to " -"display a nice average accuracy and loss for every round)." +"`Use Flower Baselines for your research " +"`__" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +msgid "" +"`Watch Flower AI Summit 2024 videos `__" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 msgid "" -"If you want to check out everything put together, you should check out the " -"`full code example `_ ." +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "시작하기" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 msgid "" -"Of course, this is a very basic example, and a lot can be added or modified, " -"it was just to showcase how simply we could federate a Hugging Face workflow " -"using Flower." +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "사전 릴리즈 설치" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very " -"well used :code:`TensorFlow`." +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST using " -"Flower and CoreML on iOS devices." +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv " -"`. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that " -"all have the same model." +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 msgid "" -"*Clients* are responsible for generating individual weight updates for the " -"model based on their local datasets. These updates are then sent to the " -"*server* which will aggregate them to produce a better model. Finally, the " -"*server* sends this improved version of the model back to each *client*. A " -"complete cycle of weight updates is called a *round*." +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You " -"can do this by using pip:" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and MNIST " -"as our dataset. For simplicity reasons we will use the complete Flower " -"client with CoreML, that has been implemented and stored inside the Swift " -"SDK. The client implementation can be seen below:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a " -"dependency in your project. For our application, we will store the logic of " -"our app in :code:`FLiOSModel.swift` and the UI elements in :code:" -"`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift` in this " -"quickstart. Please refer to the `full code example `_ to learn more about the app." +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS device. " -"We need to pass the url to access mlmodel and run CoreML machine learning " -"processes, it can be retrieved by calling the function :code:`Bundle.main." -"url`. For the MNIST dataset, we need to preprocess it into :code:" -"`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader." -"swift`." +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 -msgid "" -"Since CoreML does not allow the model parameters to be seen before training, " -"and accessing the model parameters during or after the training can only be " -"done by specifying the layer name, we need to know this information " -"beforehand, through looking at the model specification, which are written as " -"proto files. The implementation can be seen in :code:`MLModelInspect`." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 -msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "릴리즈 동안에" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 msgid "" -"Then start the Flower gRPC client and start communicating to the server by " -"passing our Flower client to the function :code:`startFlwrGRPC`." +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 msgid "" -"That's it for the client. We only have to implement :code:`Client` or call " -"the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The " -"attribute :code:`hostname` and :code:`port` tells the client which server to " -"connect to. This can be done by entering the hostname and port in the " -"application before clicking the start button to start the federated learning " -"process." +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named :code:" -"`server.py`, import Flower and start the server:" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "모델 매개변수." -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and multiple " -"clients. We therefore have to start the server first:" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"Once the server is running we can start the clients in different terminals. " -"Build and run the client through your Xcode, one through Xcode Simulator and " -"the other by deploying it to your iPhone. To see more about how to deploy " -"your app to iPhone or Simulator visit `here `_." +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code `_ for this example can be found in :" -"code:`examples/ios`." +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"Jax to train a linear regression model on a scikit-learn dataset." -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 클라이언트 앱을 실행합니다." -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"Pandas to perform Federated Analytics." +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 msgid "" -"Please refer to the `full code example `_ to learn more." +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"PyTorch to train a CNN model on MNIST." +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural Network " -"on CIFAR10 using Flower and PyTorch." +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 msgid "" -"Our example consists of one *server* and two *clients* all having the same " -"model." -msgstr "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 서버앱" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 msgid "" -"*Clients* are responsible for generating individual weight-updates for the " -"model based on their local datasets. These updates are then sent to the " -"*server* which will aggregate them to produce a better model. Finally, the " -"*server* sends this improved version of the model back to each *client*. A " -"complete cycle of weight updates is called a *round*." +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go ahead " -"and install PyTorch and the **torchvision** library:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training procedure " -"and network architecture are based on PyTorch's `Deep Learning with PyTorch " -"`_." +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the " -"training and test data that are then normalized." +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset is " -"done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 msgid "" -"Define then the validation of the machine learning network. We loop over " -"the test set and measure the loss and accuracy of the test set." +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 msgid "" -"After defining the training and testing of a PyTorch machine learning model, " -"we use the functions for the Flower clients." +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute " -"Blitz':" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 msgid "" -"The Flower server interacts with clients through an interface called :code:" -"`Client`. When the server selects a particular client for training, it sends " -"training instructions over the network. The client receives those " -"instructions and calls one of the :code:`Client` methods to run your code (i." -"e., to train the neural network we defined earlier)." +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"PyTorch. Implementing :code:`NumPyClient` usually means defining the " -"following methods (:code:`set_parameters` is optional though):" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 msgid "" -"update the local model weights with the parameters received from the server" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add one " -"line to actually run this client:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or :code:" -"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " -"client of type :code:`NumPyClient` you'll need to first call its :code:" -"`to_client()` method. The string :code:`\"[::]:8080\"` tells the client " -"which server to connect to. In our case we can run the server and the client " -"on the same machine, therefore we use :code:`\"[::]:8080\"`. If we run a " -"truly federated workload with the server and clients running on different " -"machines, all that needs to change is the :code:`server_address` we point " -"the client at." +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "" -"Once the server is running we can start the clients in different terminals. " -"Open a new terminal and start the first client:" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 msgid "" -"Each client will have its own dataset. You should now see how the training " -"does in the very first terminal (the one that started the server):" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this example can be found " -"in :code:`examples/quickstart-pytorch`." +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"PyTorch Lightning to train an Auto Encoder model on MNIST." +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 msgid "" -"Let's build a horizontal federated learning system using PyTorch Lightning " -"and Flower!" +"Last but not least, we specify the resources for each client and run the " +"simulation." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 msgid "" -"Please refer to the `full code example `_ to learn more." +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"scikit-learn to train a linear regression model." +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic Regression` " -"model on MNIST using Flower and scikit-learn." +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 msgid "" -"It is recommended to create a virtual environment and run everything within " -"this :doc:`virtualenv `." +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 msgid "" -"*Clients* are responsible for generating individual model parameter updates " -"for the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce an updated global model. " -"Finally, the *server* sends this improved version of the model back to each " -"*client*. A complete cycle of parameters updates is called a *round*." +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Flower 시뮬레이션." -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that we " -"need for our federated learning setup within :code:`utils.py`. The :code:" -"`utils.py` contains different functions defining all the machine learning " -"basics:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 msgid "" -"Please check out :code:`utils.py` `here `_ for more details. The pre-" -"defined functions are used in the :code:`client.py` and imported. The :code:" -"`client.py` also requires to import several packages such as Flower and " -"scikit-learn:" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular image " -"classification dataset of handwritten digits for machine learning, and " -"partition the dataset for FL. This can be conveniently achieved using " -"`Flower Datasets `_. The :code:" -"`FederatedDataset.load_partition()` method loads the partitioned training " -"set for each partition ID defined in the :code:`--partition-id` argument." +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format msgid "" -"Next, the logistic regression model is defined and initialized with :code:" -"`utils.set_initial_params()`." +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 msgid "" -"The Flower server interacts with clients through an interface called :code:" -"`Client`. When the server selects a particular client for training, it sends " -"training instructions over the network. The client receives those " -"instructions and calls one of the :code:`Client` methods to run your code (i." -"e., to fit the logistic regression we defined earlier)." +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"scikit-learn. Implementing :code:`NumPyClient` usually means defining the " -"following methods (:code:`set_parameters` is optional though):" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add one " -"line to actually run this client:" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." msgstr "" +"이 튜토리얼에서 연합 학습이 무엇인지 배우고 Flower로 첫 번째 시스템을 구축하고 점진적으로 확장해 나갈 것입니다. 본 " +"튜토리얼의 모든 부분을 완성할 수 있다면, 당신은 고급 연합 학습 시스템을 구축하여 그 분야의 현재 최고 기술 수준에 접근할 수 " +"있을 것입니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" -"That's it for the client. We only have to implement :code:`Client` or :code:" -"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " -"client of type :code:`NumPyClient` you'll need to first call its :code:" -"`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells the client " -"which server to connect to. In our case we can run the server and the client " -"on the same machine, therefore we use :code:`\"0.0.0.0:8080\"`. If we run a " -"truly federated workload with the server and clients running on different " -"machines, all that needs to change is the :code:`server_address` we pass to " -"the client." +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." msgstr "" +"🧑‍🏫이 튜토리얼은 사전 지식을 많이 필요로 하지 않으며 연합 학습에 대해 상세히알 필요는 없습니다. 데이터 과학과 파이썬 " +"프로그래밍에 대한 기본적인 이해만 가정합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import again " -"all required libraries such as Flower and scikit-learn." +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and " -"the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower " -"Datasets here to load the test split of the MNIST dataset for server-side " -"evaluation." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "전통적인 머신러닝(기계학습)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" -"The :code:`main` contains the server-side parameter initialization :code:" -"`utils.set_initial_params()` as well as the aggregation strategy :code:`fl." -"server.strategy:FedAvg()`. The strategy is the default one, federated " -"averaging (or FedAvg), with two clients and evaluation after each federated " -"learning round. The server can be started with the command :code:`fl.server." -"start_server(server_address=\"0.0.0.0:8080\", strategy=strategy, config=fl." -"server.ServerConfig(num_rounds=3))`." -msgstr "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 간략히 요약하겠습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server first:" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." msgstr "" +"머신러닝에서 우리는 모델과 데이터를 가지고 있습니다. 모델은 신경망(그림과 같이)일 수도 있고 고전적인 선형 회귀와 같은 다른 것일" +" 수도 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this example can be found in :code:" -"`examples/sklearn-logreg-mnist`." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "모델과 데이터" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"TensorFlow to train a MobilNetV2 model on CIFAR-10." +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." msgstr "" +"우리는 유용한 작업을 수행하기 위해 데이터를 사용하여 모델을 훈련합니다. 작업은 이미지 속 물체를 감지하거나 음성 녹음을 기록하거나" +" 바둑과 같은 게임을 하는 것일 수 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|33cacb7d985c4906b348515c1a5cd993|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "데이터를 이용한 모델 훈련" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install TF " -"as well:" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." msgstr "" +"스마트폰에서 사용자와 앱의 상호 작용, 센서 데이터를 수집하는 자동차, 키보드를 통해 입력을 받는 노트북 또는 누군가 노래를 " +"부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|cc080a555947492fa66131dc3a967603|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "핸드폰에 있는 데이터" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image " -"classification dataset for machine learning. The call to :code:`tf.keras." -"datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, and then " -"returns the entire training and test set as NumPy ndarrays." +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." msgstr "" +"또한 중요한 것은 이 \"다른 곳\"이 보통 한 곳만 아니라 여러 곳이라는 것입니다. 같은 앱을 실행하는 여러 기기일 수도 " +"있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:47 -msgid "" -"Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 " -"with 10 output classes:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|085c3e0fb8664c6aa06246636524b20b|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "데이터가 여러 장치에 있습니다" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"Keras. The :code:`NumPyClient` interface defines three methods which can be " -"implemented in the following way:" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." msgstr "" +"따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 " +"서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|bfe69c74e48c45d49b50251c38c2a019|" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "중앙 데이터 수집" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 msgid "" -"You should now see how the training does in the very first terminal (the one " -"that started the server):" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." msgstr "" +"모든 데이터가 한 곳에 모이면, 우리는 궁극적으로 머신러닝 알고리즘을 사용하여 데이터에서 모델을 훈련시킬 수 있습니다. 이것이 바로" +" 우리가 기본적으로 사용해 온 머신러닝 방법입니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this can be found in :" -"code:`examples/quickstart-tensorflow/client.py`." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "중앙 데이터 훈련" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "클래식 머신러닝의 어려움" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"XGBoost to train classification models on trees." +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." msgstr "" +"우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 " +"트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|163117eb654a4273babba413cf8065f5|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "집중화 가능" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that maximises " -"the computational boundaries for boosted tree methods. It's primarily " -"designed to enhance both the performance and computational speed of machine " -"learning models. In XGBoost, trees are constructed concurrently, unlike the " -"sequential approach taken by GBDT." +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." msgstr "" +"그러나 이 방법은 다른 많은 경우에 적용되지 않을 수 있습니다. 예를 들어, 중앙 집중식 서버에 데이터가 없거나 서버의 데이터가 " +"좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:20 -msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning techniques." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "집중화 불가능" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems for " -"specialised applications, like survival analysis and financial fraud " -"detection." +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" msgstr "" +"전통적인 중앙 집중식 머신러닝 방법이 현실 세계에서 매우 중요한 수많은 사용 사례를 충족시킬 수 없는 이유가 있습니다. 이유는 " +"다음과 같은 여러 가지가 있습니다:" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 msgid "" -"Federated learning ensures that raw data remains on the local device, making " -"it an attractive approach for sensitive domains where data security and " -"privacy are paramount. Given the robustness and efficiency of XGBoost, " -"combining it with federated learning offers a promising solution for these " -"specific challenges." +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." msgstr "" +"**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), PDPL (아르헨티나), " +"KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR (중국), PDPB (인도), PIPA (한국), " +"APPI (일본), PDP (인도네시아), PDPA (싱가포르), APP (호주)등의 법규로 민감한 데이터가 이동하지 않도록 " +"보호하고 있습니다. 실제로 이러한 규정은 사용자가 세계의 다른 지역에 살고 데이터가 다른 데이터 보호 규정에 의해 통제되기 때문에 " +"단일 조직이 자체 사용자 데이터를 인공 지능 학습에 사용하는 것을 방지하기도 합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart `_) with two *clients* and one *server* to " -"demonstrate how federated XGBoost works, and then we dive into a more " -"complex example (`full code xgboost-comprehensive `_) to run various " -"experiments." +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." msgstr "" +"**사용자 선호도**: 규정 외에도 일부 사용 사례에서 사용자는 데이터가 자기 장치를 떠나지 않기를 예상합니다. 휴대폰의 디지털 " +"키보드에 비밀번호와 신용카드 정보를 입력하면 비밀번호가 해당 키보드를 개발한 회사의 서버에 뜨길 원하지는 않겠죠? 사실, 이 사용 " +"사례가 애당초 연합 학습이 발명된 이유였습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." msgstr "" +"**데이터 볼륨**: 일부 센서(예:카메라)는 너무 많은 데이터 볼륨을 생성하여 모든 데이터를 수집하는 것이 실현 가능하지도 않고 " +"경제적이지도 않습니다(예: 대역폭 또는 통신 효율로 인해). 전국에 수백 개 기차역이 있는 국가 철도 서비스를 생각해 보세요. 각 " +"기차역에 수 많은 보안 카메라가 설치되어 있다면, 그들이 생산하는 대량의 미가공 된 온디바이스 데이터는 처리 및 저장을 위해 " +"엄청나게 강력하고 매우 비싼기반 구조를 필요로 합니다. 그런데 대부분의 데이터는 유용하지도 않습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 msgid "" -"*Clients* are responsible for generating individual weight-updates for the " -"model based on their local datasets. Now that we have all our dependencies " -"installed, let's run a simple distributed training with two clients and one " -"server." +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" +"`Brave `__ 브라우저나 `Signal `__ " +"메신저와 같은 개인 정보 보호 시스템의 인기는 사용자들이 개인 정보 보호에 신경 쓴다는 것을 보여줍니다. 실제로 그러한 대안이 " +"존재하는 경우 다른 대안보다 개인 정보 보호 강화 버전을 선택합니다. 그런데 이러한 사례에 머신러닝 및 데이터 과학을 적용하여 " +"프라이버시 데이터를 활용하려면 어떻게 해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 분야입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "연합 학습" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets " -"and other related functions:" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" msgstr "" +"연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 컴퓨팅 능력을 데이터가 생성되는 장소로 이동 " +"시킴으로써 분산된 데이터에서 머신러닝을 실현합니다. 요약하자면:" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower " -"Datasets and conduct data partitioning for FL:" -msgstr "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" +"이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 사용할 수 있습니다. 이제 다양한 병원이 협력할 수 " +"있도록 함으로써 우수한 의료 AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 훈련하여 금융 사기를 " +"해결할 수 있습니다. 개인 정보 보호를 강화하지 않는 대안보다 더 나은 AI가 내장된 새로운 개인 정보 보호 강화 애플리케이션(예:" +" 보안 메시징)을 구축할 수 있습니다. 그것들은 떠오르는 몇 가지 예에 불과합니다. 연합 학습을 구축함에 따라 이전에 액세스할 수 " +"없었던 많은 데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 많아지고 있습니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "연합 학습의 5단계" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "0단계: 글로벌 모델 초기화" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load the " -"partition for the given client based on :code:`node_id`:" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." msgstr "" +"서버에서 모델을 초기화하는 것으로 시작합니다. 이것은 전통적인 중앙 집중식 학습과도 동일합니다: 임의로 또는 이전에 저장된 " +"체크포인트에서 모델 매개변수를 초기화합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|f403fcd69e4e44409627e748b404c086|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 -msgid "" -"The functions of :code:`train_test_split` and :code:" -"`transform_dataset_to_dmatrix` are defined as below:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "글로벌 모델 초기화" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 msgid "" -"The :code:`num_local_round` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to GPU " -"by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as evaluation " -"metric." +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." msgstr "" +"다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 에지 디바이스 또는 조직에 속한 서버)로 보냅니다. " +"이것은 각 참여 노드가 동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. 일반적으로 모든 노드가 아닌 몇 " +"개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|4b00fe63870145968f8443619a792a42|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 -msgid "" -"After loading the dataset we define the Flower client. We follow the general " -"rule to define :code:`XgbClient` class inherited from :code:`fl.client." -"Client`." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "글로벌 모델 전송" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures " -"for training." -msgstr "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "2단계: 각 조직/장치(클라이언트 노드)의 데이터에 대해 로컬로 모델 훈련" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` " -"methods insides :code:`XgbClient` class as follows." +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." msgstr "" +"이제 모든(선택된) 클라이언트 노드에는 최신 버전의 글로벌 모델 파라미터가 있으며 로컬 훈련을 시작합니다. 그들은 자신의 로컬 " +"데이터 세트를 사용하여 자신의 로컬 모델을 훈련합니다. 모델이 완전히 수렴할 때까지 훈련하지 않고 잠시만 훈련합니다. 이는 로컬 " +"데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:210 -msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use :code:`get_parameters` " -"and :code:`set_parameters` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in :code:`get_parameters` when it is " -"called by the server at the first round." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|368378731066486fa4397e89bc6b870c|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 -msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up " -"the first set of trees. the returned Booster object and config are stored " -"in :code:`self.bst` and :code:`self.config`, respectively. From the second " -"round, we load the global model sent from server to :code:`self.bst`, and " -"then update model weights on local training data with function :code:" -"`local_boost` as follows:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "로컬 데이터에 대한 훈련" -#: ../../source/tutorial-quickstart-xgboost.rst:269 -msgid "" -"Given :code:`num_local_round`, we update trees by calling :code:`self.bst." -"update` method. After training, the last :code:`N=num_local_round` trees " -"will be extracted to send to the server." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "3단계: 모델 파라미터를 업데이트하여 서버로 되돌리기" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to conduct " -"evaluation on valid set. The AUC value will be returned." +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." msgstr "" +"로컬 훈련 후에는 클라이언트 노드마다 원래 받은 모델 파라미터의 버전이 조금씩 다릅니다. 파라미터가 다른 이유는 각 클라이언트 " +"노드의 로컬 데이터 세트에 다른 데이터가 있기 때문입니다. 그런 다음 클라이언트 노드는 이러한 모델 업데이트를 서버로 다시 " +"보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:294 -msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one " -"line to actually run this client:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a66aa83d85bf4ffba7ed660b718066da|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 -msgid "" -"That's it for the client. We only have to implement :code:`Client`and call :" -"code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` tells the " -"client which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use :code:`\"[::]:8080\"`. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the :code:`server_address` " -"we point the client at." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "모델 업데이트 전송" -#: ../../source/tutorial-quickstart-xgboost.rst:311 -msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version of " -"the model back to each *client* to finish a complete FL round." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "4단계: 모델 업데이트를 새 글로벌 모델로 집계" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from :" -"code:`flwr.server.strategy`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" msgstr "" +"서버는 선택된 클라이언트 노드들로부터 모델 업데이트들을 수신합니다. 서버가 100개의 클라이언트 노드를 선택했다면 이제 각각 " +"클라이언트의 로컬 데이터를 기반으로 훈련된 100개의 조금씩 다른 원래 글로벌 모델 버전을 갖게 됩니다. 하지만 우리는 100개의 " +"모든 클라이언트 노드의 데이터에서 학습한 내용을 포함하는 모델을 하나만 갖고 싶지 않았습니까?" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" -"We use two clients for this example. An :code:`evaluate_metrics_aggregation` " -"function is defined to collect and wighted average the AUC values from " -"clients." -msgstr "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"단일 모델 하나를 얻으려면 클라이언트 노드에서 받은 모든 모델 업데이트를 결합해야 합니다. 이 과정이 *집합*라고 하며 여러 가지 " +"방법이 있습니다. 가장 기본적인 방법은*Federated Averaging* (`McMahan et al., 2016 " +"`__)이라고 하고 보통 줄여서 *FedAvg*로 표기합니다. " +"*FedAvg* 는 100개의 모델 업데이트를 받아 이름에서 알 수 있듯이 모델 업데이트를 평균화합니다. 더 정확히 말하면, 모델 " +"업데이트의 *가중 평균* 을 각 클라이언트가 훈련에 사용한 예제 수에 따라 가중치를 부여합니다. 가중치는 각 데이터 예제가 결과 " +"글로벌 모델에 동일한 \"영향\" 을 미치는지 확인하는 데 중요합니다. 한 클라이언트에 10개의 데이터 포인트가 있고 다른 " +"클라이언트에 100개의 데이터 포인트가 있다면 가중치를 부여하지 않고 10개의 예가 100개의 사례보다 글로벌 모델에 10배 더 " +"많은 영향을 미칩니다." -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|82324b9af72a4582a81839d55caab767|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "모델 업데이트 집계" -#: ../../source/tutorial-quickstart-xgboost.rst:356 -msgid "" -"You must be curious about how bagging aggregation works. Let's look into the " -"details." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "5단계: 모델이 수렴할 때까지 1~4단계를 반복합니다" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:" -"`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. Then, we " -"override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:" -"`evaluate` methods as follows:" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." msgstr "" +"단계 1에서 4는 우리가 말하는 단일 라운드 연합 학습입니다. 글로벌 모델 파라미터는 참여하는 클라이언트 노드에 전송되고(1단계)," +" 클라이언트 노드는 로컬 데이터에 대한 훈련을 받고(2단계), 업데이트된 모델을 서버에 전송하고(3단계), 서버는 모델 업데이트를 " +"집계하여 글로벌 모델의 새로운 버전을 얻습니다(4단계)." -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." msgstr "" +"한 라운드의 반복에서 해당 반복에 참여하는 각 클라이언트 노드는 짧은 시간 동안만 훈련합니다. 집계 단계(4단계) 이후 우리 모델이" +" 관련된 모든 클라이언트 노드의 모든 데이터에 대해 잠시 동안만 훈련되었음을 의미합니다. 그런 다음 모든 클라이언트 노드의 " +"데이터에서 잘 작동하는 완전히 훈련된 모델에 도달하려면 이 훈련 과정을 계속 반복해야 합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling :code:" -"`_get_tree_nums`. Then, the fetched information will be aggregated. After " -"that, the trees (containing model weights) are aggregated to generate a new " -"tree model." -msgstr "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"축하합니다, 이제 연합 학습의 기초에 대해 알게 되었습니다. 물론 아직 논의해야 할 내용이 많지만 이는 연합 학습의 축소판일 " +"뿐입니다. 본 튜토리얼의 후반부에는 좀 더 자세히 설명하겠습니다. 흥미로운 질문은 다음과 같습니다: 다음 라운드에 참여해야 할 가장" +" 좋은 클라이언트 노드를 어떻게 선택할 수 있을까요? 모델 업데이트를 집계하는 가장 좋은 방법은 무엇일까요? 실패한 클라이언트 " +"노드(낙오자)를 어떻게 처리할 수 있을까요?" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." msgstr "" +"다양한 클라이언트 노드의 분산된 데이터에 대해 모델을 훈련할 수 있는 것처럼 해당 데이터에 대한 모델을 평가하여 가치 있는 " +"메트릭(metrics)을 받을 수도 있습니다. 이를 연합 평가라고 하며 FE라고 약칭하기도 합니다. 사실 연합 평가는 대부분의 연합" +" 학습 시스템에서 필수적인 부분입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "연합 분석" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in :code:" -"`metrics_distributed`. One can see that the average AUC increases over FL " -"rounds." +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." msgstr "" +"많은 경우 머신러닝은 데이터로부터 가치를 얻기 위한 필수 조건이 아닙니다. 데이터 분석을 통해 귀중한 통찰력을 얻을 수 있지만, " +"명확한 답변을 얻기에는 데이터가 충분하지 않은 경우가 많습니다. 특정 유형의 건강 상태가 발생하는 평균 연령은 몇 살입니까? 연합 " +"분석을 사용하면 여러 클라이언트 노드에서 이러한 쿼리(query)를 실행할 수 있습니다. 서버가 단일 클라이언트 노드에서 제출한 " +"결과를 보지 못하도록 보안을 강화한 집합 방식과 같은 다른 프라이버시 향상 기술과 함께 자주 사용됩니다." -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" -"The full `source code `_ for this example can be found in :code:`examples/" -"xgboost-quickstart`." -msgstr "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"차분 프라이버시(Differential Privacy)는 연합 학습의 맥락에서 종종 언급됩니다. 통계 데이터를 분석하고 공유할 때 " +"사용하는 프라이버시 보호 방식으로, 참가자 개인의 프라이버시를 보장합니다. 차분 프라이버시는 모델 업데이트에 통계적 " +"잡음(noise)를 추가하여 개별 참가자의 정보를 구별하거나 재식별할 수 없도록 함으로써 이를 달성합니다. 이 기술은 정량적 개인 " +"정보 보호 조치를 제공하는 최적화라고 볼 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time to " -"run some more comprehensive experiments by customising the experimental " -"settings. In the xgboost-comprehensive example (`full code `_), we provide " -"more options to define various experimental setups, including aggregation " -"strategies, data partitioning and centralised/distributed evaluation. We " -"also support :doc:`Flower simulation ` making it " -"easy to simulate large client cohorts in a resource-aware manner. Let's take " -"a look!" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." msgstr "" +"연합 학습, 연합 평가 및 연합 분석은 머신러닝 모델을 앞뒤로 이동하고 로컬 데이터에 대해 훈련 및 평가한 다음 업데이트된 모델을 " +"통합하기 위한 기본 프레임워크가 필요합니다. Flower가 제공하는 기반 구조는 간단하고 확장 가능하며 안전한 방식으로 이러한 " +"목표를 달성합니다. 간단히 말해서, Flower는 연합 학습, 분석 및 평가를 위한 통합 접근 방식을 제공합니다. 이를 통해 " +"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, which " -"performs FL in a client-by-client fashion. Instead of aggregating multiple " -"clients, there is only one single client participating in the training per " -"round in the cyclic training scenario. The trained local XGBoost trees will " -"be passed to the next client as an initialised model for next round's " -"boosting." -msgstr "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, 룸바, 전화)" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" -"To do this, we first customise a :code:`ClientManager` in :code:" -"`server_utils.py`:" -msgstr "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "축하합니다, 지금까지 당신은 연합 학습의 기본 지식과 그것이 어떻게 전통적 (중앙 집중식) 머신러닝과 관련되는지 배웠습니다!" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" -"The customised :code:`ClientManager` samples all available clients in each " -"FL round based on the order of connection to the server. Then, we define a " -"new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy." -"fedxgb_cyclic.py`, in order to sequentially select only one client in given " -"round and pass the received model to next client." -msgstr "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "이 튜토리얼의 다음 부분에서는 Flower와 함께 첫 번째 연합 학습 시스템을 구축할 것입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model by " -"overriding :code:`aggregate_fit`." +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate` " -"methods ensure the clients to be sequentially selected given FL round:" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." msgstr "" +"`Flower 연합 학습 튜토리얼- 1부 `__ PyTorch와 Flower를 사용하여 간단한 연합 학습 시스템을" +" 구축하는 방법을 보여줍니다." -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "현재, Flower는 \"base\" 이미지 그리고 " +#~ "\"superlink\" 이미지를 제공합니다. base 이미지는 이름에서" +#~ " 알 수 있듯이 SuperLink가 필요로 하는 기본" +#~ " dependencies를 포함하고 있습니다. 여기에는 시스템 " +#~ "dependencies, Python 및 Python 도구가 포함됩니다." +#~ " SuperLink 이미지는 base 이미지를 기반으로 하지만" +#~ " \"pip\"을 사용하여 SuperLink를 추가로 설치합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:759 -msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to " -"instantiate the data partitioner based on the given :code:`num_partitions` " -"and :code:`partitioner_type`. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data quantity " -"(uniform, linear, square, exponential)." -msgstr "" +#~ msgid "``3.11``" +#~ msgstr "``3.11``" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "``22.04``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:792 -msgid "" -"To facilitate centralised evaluation, we define a function in :code:" -"`server_utils.py`:" -msgstr "" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "``flwr/base``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:824 -msgid "" -"This function returns a evaluation function which instantiates a :code:" -"`Booster` object and loads the global model weights to it. The evaluation is " -"conducted by calling :code:`eval_set()` method, and the tested AUC value is " -"reported." -msgstr "" +#~ msgid "The Python version of the base image." +#~ msgstr "base 이미지의 Python 버전." -#: ../../source/tutorial-quickstart-xgboost.rst:827 -msgid "" -"As for distributed evaluation on the clients, it's same as the quick-start " -"example by overriding the :code:`evaluate()` method insides the :code:" -"`XgbClient` class in :code:`client_utils.py`." -msgstr "" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "``py3.11``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" -msgstr "" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "``ubuntu22.04``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:832 -msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a " -"single machine or a cluster of machines." -msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "``flwr``이 기본값." -#: ../../source/tutorial-quickstart-xgboost.rst:866 -msgid "" -"After importing all required packages, we define a :code:`main()` function " -"to perform the simulation process:" -msgstr "" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "이미지의 이름은 ``flwr_superlink``이고 태그는 " +#~ "``0.1.0``입니다. 필요에 따라 빌드 argument들 뿐만 " +#~ "아니라 이름과 태그도 정할 수 있습니다. 이 값들은" +#~ " 예시일 뿐입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, the " -"clients won't need to pre-process their partitions again." -msgstr "" +#~ msgid "Edge Client Engine" +#~ msgstr "엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling :code:`fl.simulation." -"start_simulation`:" -msgstr "" +#~ msgid "Virtual Client Engine" +#~ msgstr "가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. Let's " -"first see the sever side:" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and " -"evaluation fashion. Note that with :code:`--centralised-eval`, the sever " -"will do centralised evaluation and all functionalities for client evaluation " -"will be disabled." -msgstr "" +#~ msgid "Clone the flower repository." +#~ msgstr "Flower 레포지토리를 복제합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ ":doc:Run Flower using Docker 의 첫 번째 섹션을" +#~ " 따라 주십시오. 해당 부분을 더 자세히 설명해 " +#~ "줍니다." -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -msgid "" -"This defines various options for client data partitioning. Besides, clients " -"also have an option to conduct evaluation on centralised test set by " -"setting :code:`--centralised-eval`, as well as an option to perform scaled " -"learning rate based on the number of clients by setting :code:`--scaled-lr`." -msgstr "" +#~ msgid "``22.04``" +#~ msgstr "``22.04``" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" -msgstr "" +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." -msgstr "" +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "" +#~ msgid "``1.8.0``" +#~ msgstr "``1.8.0``" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 clients " -"with exponential distribution for 50 rounds, we first start the server as " -"below:" -msgstr "" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "SuperLink/SuperNode 또는 ServerApp 이미지 빌드" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "``1.8.0-py3.10-ubuntu22.04``" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" -msgstr "" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "다음 예시에서는 공식 Flower 기본 이미지로 SuperLink/SuperNode 또는 ServerApp이미지를 만듭니다:" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -msgid "" -"The full `code `_ for this comprehensive example can be found in :code:" -"`examples/xgboost-comprehensive`." -msgstr "" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Docker 이미지 빌드를 위해 CI를 트리거합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "" +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" +#~ "워크플로우를 트리거하려면 공동 작업자가 GitHub CI에서 " +#~ "``workflow_dispatch``를 생성해야 합니다. 이 작업은 " +#~ "UI 또는 GitHub CLI 를 통해 수행할 수" +#~ " 있습니다. 이벤트는 Flower 버전 한 가지 입력만" +#~ " 필요합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies can be " -"used to customize the execution on both the server and the clients (`part 2 " -"`__)." -msgstr "" +#~ msgid "**Via the UI**" +#~ msgstr "**UI를 통해서**" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll continue to customize the federated learning system " -"we built previously by creating a custom version of FedAvg (again, using " -"`Flower `__ and `PyTorch `__)." -msgstr "" +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" +#~ "``Build docker images`` 워크플로우 `페이지 " +#~ "`_로 이동합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join the " -"Flower community on Slack to connect, ask questions, and get help: `Join " -"Slack `__ 🌼 We'd love to hear from you in the " -"``#introductions`` channel! And if anything is unclear, head over to the " -"``#questions`` channel." -msgstr "" +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "``Run workflow`` 버튼을 누르고 ``Version of Flower``에 Flower의 새버전을 입력합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "**초록색**의 ``Run workflow``버튼을 클릭합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "" +#~ msgid "**Via the GitHub CI**" +#~ msgstr "**GitHub CI를 통해서**" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 -msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "``gh auth login``을 통해 로그인 했는지, 현재 작업 디렉토리가 Flower 리포지토리의 root인지 확인하세요." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "" +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" +#~ "``gh workflow run docker-images.yml -f" +#~ " flwr-version=``을 통해 워크플로우 를" +#~ " 트리거합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "예시: JAX - JAX Federated 실행" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" +#~ "Flower를 시작하는 가장 간단한 방법은 `Docker " +#~ "Hub `__에서 찾을 수 " +#~ "있는 미리 만들어진 Docker 이미지를 사용하는 것입니다." +#~ " 지원되는 아키텍처는 ``amd64`` 및 ``arm64v8``입니다." + +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "전이 표시되지 않고 대신 명령을 찾을 수 없다는" +#~ " 오류가 표시되는 경우 먼저 Docker를 설치해야 " +#~ "합니다. `여기 `_에서" +#~ " 설치 지침을 찾을 수 있습니다." + +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" +#~ "Linux에서 Docker 명령을 실행하려면 ``sudo`` 권한이" +#~ " 필요합니다. ``sudo`` 를 사용하지 않으려면 공식 " +#~ "Docker 웹사이트의 `Post-installation steps " +#~ "`_를" +#~ " 따르세요." + +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" +#~ "최적의 성능과 호환성을 보장하려면 SuperLink, SuperNode" +#~ " 및 ServerApp 이미지를 함께 실행할 때 버전이" +#~ " 동일해야 합니다. 이렇게 하면 원활한 통합을 보장하고" +#~ " 서로 다른 버전을 사용할 때 발생할 수 있는" +#~ " 잠재적인 충돌이나 문제를 방지할 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled (on " -"Google Colab: ``Runtime > Change runtime type > Hardware acclerator: GPU > " -"Save``). Note, however, that Google Colab is not always able to offer GPU " -"acceleration. If you see an error related to GPU availability in one of the " -"following sections, consider switching back to CPU-based execution by " -"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " -"acceleration enabled, you should see the output ``Training on cuda``, " -"otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "Flower SuperLink" +#~ msgstr "Flower SuperLink" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "Flower를 사용해보고 싶다면 다음 명령을 사용하면 됩니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into ten " -"smaller datasets (each split into training and validation set), and wrap " -"everything in their own ``DataLoader``. We introduce a new parameter " -"``num_clients`` which allows us to call ``load_datasets`` with different " -"numbers of clients." -msgstr "" +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "이 명령은 Docker Hub에서 ``1.8.0`` 태그가 " +#~ "있는 Docker 이미지를 가져옵니다. 이 태그는 Flower" +#~ " 버전을 지정합니다. 이 경우, Flower 1.8.0입니다." +#~ " '`--rm`` 플래그는 컨테이너가 종료된 후 컨테이너를 " +#~ "제거하도록 Docker에 지시합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" +#~ "``-p :`` 플래그는 호스트의 포트 " +#~ "``9091``/``9092``를 컨테이너의 ``9091``/``9092``에 매핑하여 " +#~ "``http://localhost:9091``의 드라이버 API와 " +#~ "``http://localhost:9092``의 Fleet API에 액세스할 수" +#~ " 있도록 Docker에 지시합니다. 마지막으로, 태그 뒤에 " +#~ "오는 모든 플래그는 Flower SuperLink에 전달됩니다. " +#~ "여기서는 ``--insecure``플래그를 전달합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "" -"Let's continue with the usual model definition (including ``set_parameters`` " -"and ``get_parameters``), training and test functions:" -msgstr "" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "``--insecure`` 플래그는 안전하지 않은 통신(HTTPS가 아닌" +#~ " HTTP 사용)을 활성화하며 테스트 목적으로만 사용해야 " +#~ "합니다. 프로덕션 환경에 배포할 때는 `SSL " +#~ "`__을 활성화할 것을 강력히 권장합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "'`--help``을 사용하면 SuperLink가 지원하는 모든 플래그를 볼 수 있습니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 -msgid "" -"To implement the Flower client, we (again) create a subclass of ``flwr." -"client.NumPyClient`` and implement the three methods ``get_parameters``, " -"``fit``, and ``evaluate``. Here, we also pass the ``cid`` to the client and " -"use it log additional details:" -msgstr "" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "호스트 시스템에 상태를 저장할 볼륨 마운트하기" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" +#~ "호스트 시스템에서 SuperLink의 상태를 유지하려면 호스트 " +#~ "시스템에서 파일을 저장할 디렉터리와 데이터베이스 파일의 이름을" +#~ " 지정하기만 하면 됩니다. 기본적으로 SuperLink 컨테이너는" +#~ " 사용자 ID가 ``49999``인 ``app``이라는 루트가 아닌" +#~ " 사용자로 실행됩니다. 마운트된 디렉터리에 적절한 권한이 " +#~ "있는지 확인하려면 새 디렉터리를 생성하고 디렉터리의 사용자" +#~ " ID를 ``49999``로 변경하는 것이 좋습니다. 나중에 " +#~ "디렉터리를 삭제하려면 ``sudo chown -R $USER:$(id" +#~ " -gn) state``를 실행하여 사용자 ID를 현재 " +#~ "사용자 ID로 다시 변경할 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "필요한 모든 파일이 로컬``certificates`` 디렉터리에 있다고" +#~ " 가정하면, ``--volume``플래그를 사용하여 로컬 디렉터리를 " +#~ "컨테이너의 ``/app/certificates/`` 디렉터리에 마운트할 수 " +#~ "있습니다. 이렇게 하면 SuperLink 가 컨테이너 내의" +#~ " 파일에 액세스할 수 있습니다. ``ro``는 ``read-" +#~ "only``을 의미합니다. Docker 볼륨은 기본적으로 " +#~ "``read-write``로 설정되어 있는데, 이 옵션을 사용하면" +#~ " 볼륨을 ``read-only``으로 만들 수 있습니다. " +#~ "마지막으로 인증서 및 키 파일의 이름을 ``--ssl-" +#~ "ca-certfile``, ``--ssl-certfile`` 및 " +#~ "``--ssl-keyfile`` 플래그와 함께 SuperLink에 전달합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 -msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher " -"learning rate (potentially also other hyperparameters) to the optimizer of a " -"fraction of the clients. We will keep the sampling of the clients as it is " -"in ``FedAvg`` and then change the configuration dictionary (one of the " -"``FitIns`` attributes)." -msgstr "" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" +#~ "SuperNode Docker 이미지는 Flower의 사전 설치된 " +#~ "버전과 함께 제공되며, 자체 SuperNode 이미지를 " +#~ "구축하기 위한 기반 역할을 합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 -msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" +#~ "Flower 레포지토리에서 찾을 수 있는 ``quickstart-" +#~ "pytorch`` 예제를 사용하여 ClientApp을 도커라이즈하는 " +#~ "방법을 설명하겠습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "" +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "시작하기 전에 로컬 개발 환경에서 몇 가지 전제" +#~ " 조건을 충족해야 합니다. 'quickstart-pytorch' " +#~ "예제 대신 ClientApp을 실행하려는 경우 첫 번째 " +#~ "부분을 건너뛸 수 있습니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom " -"strategy enables granular control over client node configuration, result " -"aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``. To " -"make custom strategies even more powerful, you can pass custom functions to " -"the constructor of your new class (``__init__``) and then call these " -"functions whenever needed." -msgstr "" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "다음과 같은 프로젝트 레이아웃을 가정해 보겠습니다:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: `Join " -"Slack `__" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" +#~ "먼저 ``ClientApp`` 코드가 있는 디렉토리에 " +#~ "``requirements.txt`` 파일을 만들어야 합니다. 이 " +#~ "파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 -msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd also " -"love to hear who you are in ``#introductions``!" -msgstr "" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" +#~ "`flwr `__ 는 이미 " +#~ "``flwr/supernode`` 기본 이미지에 설치되어 있으므로, " +#~ "``torch``, ``tensorflow`` 등과 같은 다른 패키지" +#~ " dependencies만 ``requirements.txt``에 포함시키면 됩니다." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -msgid "" -"The `Flower Federated Learning Tutorial - Part 4 `__ introduces " -"``Client``, the flexible API underlying ``NumPyClient``." -msgstr "" +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` " +#~ "예제를 사용하는 경우 ``examples/quickstart-pytorch``에" +#~ " ``Dockerfile.supernode``라는 새 파일을 생성합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In the " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__), we learned how strategies can be used " -"to customize the execution on both the server and the clients (`part 2 " -"`__), and we built our own custom strategy from scratch (`part " -"3 `__)." -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" +#~ "처음 두 줄에서는 ``nightly`` 태그가 붙은 " +#~ "SuperNode 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로," +#~ " ``requirements.txt`` 파일을 이미지에 복사하여 " +#~ "ClientApp dependencies 요소를 설치하고 ``pip " +#~ "install``을 실행합니다. 마지막 두 줄에서 " +#~ "``client.py`` 모듈을 이미지에 복사하고 ``client:app`` " +#~ "인수를 사용하여 진입점을 ``flower-client-app``로 " +#~ "설정합니다. 인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 " +#~ "객체 참조 (``:``) 입니다." + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 빌드" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 -msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new baseclass " -"for building clients, simply named ``Client``. In previous parts of this " -"tutorial, we've based our client on ``NumPyClient``, a convenience class " -"which makes it easy to work with machine learning libraries that have good " -"NumPy interoperability. With ``Client``, we gain a lot of flexibility that " -"we didn't have before, but we'll also have to do a few things the we didn't " -"have to do before." -msgstr "" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" +#~ "이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1``" +#~ " 태그를 붙였습니다. 여기서 선택한 값은 예시일 뿐이라는" +#~ " 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 -msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "" +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 실행" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into ten " -"smaller datasets (each split into training and validation set), and wrap " -"everything in their own ``DataLoader``." -msgstr "" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "" +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 -msgid "" -"So far, we've implemented our client by subclassing ``flwr.client." -"NumPyClient``. The three methods we implemented are ``get_parameters``, " -"``fit``, and ``evaluate``. Finally, we wrap the creation of instances of " -"this class in a function called ``client_fn``:" -msgstr "" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 -msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정합니다" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 -msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 -msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and the " -"data)." -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__를 생성하고 ``--network`` argument를 " +#~ "사용하고 SuperNodes를 실행할 Docker 네트워크의 이름을" +#~ " 전달하면 됩니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to makes " -"it look like a subclass of ``flwr.client.Client``, not ``flwr.client." -"NumPyClient``. In fact, the Flower core framework doesn't know how to handle " -"``NumPyClient``'s, it only knows how to handle ``Client``'s. ``NumPyClient`` " -"is just a convenience abstraction built on top of ``Client``." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "SuperNode 바이너리에 전달됩니다. SuperNode가 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on top " -"of ``Client``." -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "SuperNode 이미지와 마찬가지로 ServerApp Docker " +#~ "이미지는 Flower의 사전 설치된 버전과 함께 제공되며," +#~ " 자체 ServerApp 이미지를 구축하기 위한 기본 " +#~ "역할을 합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 -msgid "" -"Let's try to do the same thing using ``Client`` instead of ``NumPyClient``." -msgstr "" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-" +#~ "pytorch`` 예제를 사용하겠습니다. 아직 수행하지 않았다면 " +#~ "계속 진행하기 전에 `SuperNode Prerequisites`_ 을" +#~ " 따르세요." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta make " -"sure our new ``Client``-based client works, right?" -msgstr "" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "ServerApp Dockerfile 만들기" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 -msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을" +#~ " 생성해야 합니다. ``quickstart-pytorch`` 예제를 " +#~ "사용하는 경우 ``examples/quickstart-pytorch``에 " +#~ "``Dockerfile.serverapp``이라는 새 파일을 생성합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 -msgid "" -"First of all, it's more code. But why? The difference comes from the fact " -"that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the network, " -"it eventually needs to turn these parameters into ``bytes``. Turning " -"parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it needs " -"to serialize parameters on the server-side and send them to the client, the " -"client needs to deserialize them to use them for local training, and then " -"serialize the updated parameters again to send them back to the server, " -"which (finally!) deserializes them again in order to aggregate them with the " -"updates received from other clients." -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It can " -"do so because it expects you to return parameters as NumPy ndarray's, and it " -"knows how to handle these. This makes working with machine learning " -"libraries that have good NumPy support (most of them) a breeze." -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "처음 두 줄에서는 ``1.8.0`` 태그가 붙은 " +#~ "ServerApp 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막" +#~ " 두 줄에서는 ``server.py`` 모듈을 이미지에 복사하고" +#~ " ``server:app`` argument를 사용하여 진입점을 " +#~ "``flower-server-app``로 설정합니다. 인수는 ServerApp" +#~ " 컨테이너 내에서 실행될 ServerApp의 객체 " +#~ "참조(``:``)입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 -msgid "" -"In terms of API, there's one major difference: all methods in Client take " -"exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return exactly " -"one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return " -"values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These ``*Ins`` " -"and ``*Res`` objects in ``Client`` wrap all the individual values you're " -"used to from ``NumPyClient``." -msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 빌드" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 실행" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 -msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 -msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an object. " -"This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지정합니다" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 -msgid "" -"Federated Learning relies heavily on internet communication for training by " -"sending Python objects back and forth between the clients and the server. " -"This means that serialization is an essential part of Federated Learning." -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__,를 생성하고 ``--network`` argument를 " +#~ "사용하여 ServerApp을 실행할 Docker 네트워크의 이름을 " +#~ "전달하면 됩니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "ServerApp 바이너리에 전달됩니다. ServerApp에서 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 -msgid "" -"This is where the real serialization/deserialization will happen, especially " -"in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` " +#~ "플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +#~ "디렉터리에 마운트할 수 있습니다. 이렇게 하면 " +#~ "ServerApp이 컨테이너 내의 인증서에 액세스할 수 " +#~ "있습니다. 컨테이너를 시작할 때 ``--root-" +#~ "certificates`` 플래그를 사용하세요." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert our " -"arrays." -msgstr "" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_supernode `\\ \\(\\)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 -msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we will " -"just have to call our custom functions in our ``flwr.client.Client``." -msgstr "" +#~ msgid "d defaults to None." +#~ msgstr "d는 기본값이 None입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 -msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` defined " -"above." -msgstr "" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 -msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we need " -"to serialize our local results with ``ndarrays_to_sparse_parameters``." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 -msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters with " -"our custom function." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 -msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change the " -"serialization and deserialization here, we only need to reimplement the " -"``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other " -"functions of the strategy will be inherited from the super class ``FedAvg``." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 -msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 -msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine learning " -"libraries that have good NumPy interoperability. ``Client`` is a more " -"flexible abstraction that allows us to do things that are not possible in " -"``NumPyClient``. In order to do so, it requires us to handle parameter " -"serialization and deserialization ourselves." -msgstr "" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" +#~ "클라이언트 앱의 특정 데이터 파티션을 로드할 때 " +#~ "사용할 수 있는 식별자입니다. 시뮬레이션을 수행할 때 " +#~ "이 식별자를 사용하는 것이 더 적절합니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), congratulations! " -"You're now well equipped to understand the rest of the documentation. There " -"are many topics we didn't cover in the tutorial, we recommend the following " -"resources:" -msgstr "" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 -msgid "" -"`Check out Flower Code Examples `__" -msgstr "" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -msgid "" -"`Use Flower Baselines for your research `__" -msgstr "" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "Flower SuperLink(Driver API 및 Fleet API)를 실행하세요." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "" +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "" +#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In part " -"1, we use PyTorch for the model training pipeline and data loading. In part " -"2, we continue to federate the PyTorch-based pipeline using Flower." -msgstr "" +#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "" +#~ msgid "run\\_driver\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 -msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower (``flwr``):" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled (on " -"Google Colab: ``Runtime > Change runtime type > Hardware accelerator: GPU > " -"Save``). Note, however, that Google Colab is not always able to offer GPU " -"acceleration. If you see an error related to GPU availability in one of the " -"following sections, consider switching back to CPU-based execution by " -"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " -"acceleration enabled, you should see the output ``Training on cuda``, " -"otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "key shares." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -msgid "" -"Federated learning can be applied to many different types of tasks across " -"different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular CIFAR-10 " -"dataset. CIFAR-10 can be used to train image classifiers that distinguish " -"between images from ten different classes: 'airplane', 'automobile', 'bird', " -"'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and 'truck'." -msgstr "" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the " -"original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely for " -"experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the data " -"is naturally partitioned)." -msgstr "" +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -msgid "" -"Each organization will act as a client in the federated learning system. So " -"having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "" +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training and " -"test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" -msgstr "" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten different " -"organizations. Each ``trainloader``/``valloader`` pair contains 4000 " -"training examples and 1000 validation examples. There's also a single " -"``testloader`` (we did not split the test set). Again, this is only " -"necessary for building research or educational systems, actual federated " -"learning systems have their data naturally distributed across multiple " -"partitions." -msgstr "" +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another batch " -"of images." -msgstr "" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 -msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ `__." -msgstr "" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 -msgid "" -"We use the simple CNN described in the `PyTorch tutorial `__:" -msgstr "" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "" +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train the " -"model on the dataset of one of our organizations (``trainloaders[0]``). This " -"simulates the reality of most machine learning projects today: each " -"organization has their own data and trains models only on this internal data:" -msgstr "" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result in " -"a test set accuracy of about 41%, which is not good, but at the same time, " -"it doesn't really matter for the purposes of this tutorial. The intent was " -"just to show a simplistic centralized training pipeline that sets the stage " -"for what comes next - federated learning!" -msgstr "" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 -msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was in " -"one place (i.e., a single ``trainloader`` and a single ``valloader``). Next, " -"we'll simulate a situation where we have multiple datasets in multiple " -"organizations and where we train a model over these organizations using " -"federated learning." -msgstr "" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 -msgid "" -"In federated learning, the server sends the global model parameters to the " -"client, and the client updates the local model with the parameters received " -"from the server. It then trains the model on the local data (which changes " -"the model parameters locally) and sends the updated/changed model parameters " -"back to the server (or, alternatively, it sends just the gradients back to " -"the server, not the full model parameters)." -msgstr "" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 -msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the " -"local model: ``set_parameters`` and ``get_parameters``. The following two " -"functions do just that for the PyTorch model above." -msgstr "" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 -msgid "" -"The details of how this works are not really important here (feel free to " -"consult the PyTorch documentation if you want to learn more). In essence, we " -"use ``state_dict`` to access PyTorch model parameter tensors. The parameter " -"tensors are then converted to/from a list of NumPy ndarray's (which Flower " -"knows how to serialize/deserialize):" -msgstr "" +#~ msgid "receive the updated local model weights" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. Federated " -"learning systems consist of a server and multiple clients. In Flower, we " -"create clients by implementing subclasses of ``flwr.client.Client`` or " -"``flwr.client.NumPyClient``. We use ``NumPyClient`` in this tutorial because " -"it is easier to implement and requires us to write less boilerplate." -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 -msgid "" -"To implement the Flower client, we create a subclass of ``flwr.client." -"NumPyClient`` and implement the three methods ``get_parameters``, ``fit``, " -"and ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "" +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters to " -"the server" -msgstr "" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the model " -"parameters on the local data, and return the evaluation result to the server" -msgstr "" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 -msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through " -"``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a " -"*single client* in our federated learning system. Federated learning systems " -"have multiple clients (otherwise, there's not much to federate), so each " -"client will be represented by its own instance of ``FlowerClient``. If we " -"have, for example, three clients in our workload, then we'd have three " -"instances of ``FlowerClient``. Flower calls ``FlowerClient.fit`` on the " -"respective instance when the server selects a particular client for training " -"(and ``FlowerClient.evaluate`` for evaluation)." -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10 " -"clients on a single machine. This means that the server and all 10 clients " -"will live on a single machine and share resources such as CPU, GPU, and " -"memory. Having 10 clients would mean having 10 instances of ``FlowerClient`` " -"in memory. Doing this on a single machine can quickly exhaust the available " -"memory resources, even if only a subset of these clients participates in a " -"single round of federated learning." -msgstr "" +#~ msgid "" +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 -msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a function " -"called ``client_fn`` that creates a ``FlowerClient`` instance on demand. " -"Flower calls ``client_fn`` whenever it needs an instance of one particular " -"client to call ``fit`` or ``evaluate`` (those instances are usually " -"discarded after use, so they should not keep any local state). Clients are " -"identified by a client ID, or short ``cid``. The ``cid`` can be used, for " -"example, to load different local data partitions for different clients, as " -"can be seen below:" -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "" +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 -msgid "" -"We now have the class ``FlowerClient`` which defines client-side training/" -"evaluation and ``client_fn`` which allows Flower to create ``FlowerClient`` " -"instances whenever it needs to call ``fit`` or ``evaluate`` on one " -"particular client. The last step is to start the actual simulation using " -"``flwr.simulation.start_simulation``." -msgstr "" +#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the number " -"of clients to simulate (``num_clients``), the number of federated learning " -"rounds (``num_rounds``), and the strategy. The strategy encapsulates the " -"federated learning approach/algorithm, for example, *Federated Averaging* " -"(FedAvg)." -msgstr "" +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 -msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated " -"learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last step " -"is the actual call to ``start_simulation`` which - you guessed it - starts " -"the simulation:" -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "" +#~ msgid "" +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" +#~ msgid "" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format -msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 clients " -"(``num_clients=10``). Flower then goes ahead an asks the ``FedAvg`` strategy " -"to select clients. ``FedAvg`` knows that it should select 100% of the " -"available clients (``fraction_fit=1.0``), so it goes ahead and selects 10 " -"random clients (i.e., 100% of 10)." -msgstr "" +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 -msgid "" -"Flower then asks the selected 10 clients to train the model. When the server " -"receives the model parameter updates from the clients, it hands those " -"updates over to the strategy (*FedAvg*) for aggregation. The strategy " -"aggregates those updates and returns the new global model, which then gets " -"used in the next round of federated learning." -msgstr "" +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "" +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 -msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` are " -"empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" -msgstr "" +#~ msgid "" +#~ "`Check out Flower Code Examples " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 -msgid "" -"Flower can automatically aggregate losses returned by individual clients, " -"but it cannot do the same for metrics in the generic metrics dictionary (the " -"one with the ``accuracy`` key). Metrics dictionaries can contain very " -"different kinds of metrics and even key/value pairs that are not metrics at " -"all, so the framework does not (and can not) know how to handle these " -"automatically." -msgstr "" +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 -msgid "" -"As users, we need to tell the framework how to handle/aggregate these custom " -"metrics, and we do so by passing metric aggregation functions to the " -"strategy. The strategy will then call these functions whenever it receives " -"fit or evaluate metrics from clients. The two possible functions are " -"``fit_metrics_aggregation_fn`` and ``evaluate_metrics_aggregation_fn``." -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 -msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "" +#~ msgid "Loading the data" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 -msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "" +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 -msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom " -"evaluation metrics and calculates a single ``accuracy`` metric across all " -"clients on the server side." -msgstr "" +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 -msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial will " -"cover centralized evaluation." -msgstr "" +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "" +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 -msgid "" -"Congratulations, you just trained a convolutional neural network, federated " -"over 10 clients! With that, you understand the basics of federated learning " -"with Flower. The same approach you've seen can be used with other machine " -"learning frameworks (not just PyTorch) and tasks (not just CIFAR-10 images " -"classification), for example NLP with Hugging Face Transformers or speech " -"with SpeechBrain." -msgstr "" +#~ msgid "" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. Want " -"to customize your strategy? Initialize parameters on the server side? Or " -"evaluate the aggregated model on the server side? We'll cover all this and " -"more in the next tutorial." -msgstr "" +#~ msgid "" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -msgid "" -"The `Flower Federated Learning Tutorial - Part 2 `__ goes " -"into more depth about strategies and all the advanced things you can build " -"with them." -msgstr "" +#~ msgid "Defining the model" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "" +#~ msgid "Training the model" +#~ msgstr "" + +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and " -"Flower (`part 1 `__)." -msgstr "" +#~ msgid "" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll begin to customize the federated learning system we " -"built in the introductory notebook (again, using `Flower `__ and `PyTorch `__)." -msgstr "" +#~ msgid "Updating model parameters" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "" +#~ msgid "" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "" +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 -msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of new " -"features." -msgstr "" +#~ msgid "Implementing a Flower client" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "" +#~ msgid "" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 -msgid "" -"Flower, by default, initializes the global model by asking one random client " -"for the initial parameters. In many cases, we want more control over " -"parameter initialization though. Flower therefore allows you to directly " -"pass the initial parameters to the Strategy:" -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower " -"from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." -msgstr "" +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "" +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 -msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number of " -"arguments, amongst them the ``client_fn`` used to create ``FlowerClient`` " -"instances, the number of clients to simulate ``num_clients``, the number of " -"rounds ``num_rounds``, and the strategy." -msgstr "" +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 -msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different strategy " -"this time:" -msgstr "" +#~ msgid "Using the Virtual Client Engine" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "" +#~ msgid "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 -msgid "" -"Flower can evaluate the aggregated model on the server-side or on the client-" -"side. Client-side and server-side evaluation are similar in some ways, but " -"different in others." -msgstr "" +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly aggregated " -"model after each round of training without having to send the model to " -"clients. We're also fortunate in the sense that our entire evaluation " -"dataset is available at all times." -msgstr "" +#~ msgid "Starting the training" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 -msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, but " -"also more powerful: it doesn't require a centralized dataset and allows us " -"to evaluate models over a larger set of data, which often yields more " -"realistic evaluation results. In fact, many scenarios require us to use " -"**Federated Evaluation** if we want to get representative evaluation results " -"at all. But this power comes at a cost: once we start to evaluate on the " -"client side, we should be aware that our evaluation dataset can change over " -"consecutive rounds of learning if those clients are not always available. " -"Moreover, the dataset held by each client can also change over consecutive " -"rounds. This can lead to evaluation results that are not stable, so even if " -"we would not change the model, we'd see our evaluation results fluctuate " -"over consecutive rounds." -msgstr "" +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 -msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see how " -"we can evaluate aggregated model parameters on the server-side:" -msgstr "" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "" +#~ msgid "" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -msgid "" -"In some situations, we want to configure client-side execution (training, " -"evaluation) from the server-side. One example for that is the server asking " -"the clients to train for a certain number of local epochs. Flower provides a " -"way to send configuration values from the server to the clients using a " -"dictionary. Let's look at an example where the clients receive values from " -"the server through the ``config`` parameter in ``fit`` (``config`` is also " -"available in ``evaluate``). The ``fit`` method receives the configuration " -"dictionary through the ``config`` parameter and can then read values from " -"this dictionary. In this example, it reads ``server_round`` and " -"``local_epochs`` and uses those values to improve the logging and configure " -"the number of local training epochs:" -msgstr "" +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The built-" -"in Flower Strategies provide way to do this, and it works similarly to the " -"way server-side evaluation works. We provide a function to the strategy, and " -"the strategy calls this function for every round of federated learning:" -msgstr "" +#~ msgid "" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before starting " -"the simulation:" -msgstr "" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 -msgid "" -"As we can see, the client logs now include the current round of federated " -"learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second " -"round of federated learning, and then for two epochs during the third round." -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 -msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and used " -"this concept throughout this notebook without mentioning it explicitly: our " -"``FlowerClient`` returns a dictionary containing a custom key/value pair as " -"the third return value in ``evaluate``." -msgstr "" +#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "" +#~ msgid "" +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 -msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "" +#~ msgid "" +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is quite " -"small, we should probably train the model a bit longer, so we configure the " -"clients to perform 3 local training epochs. We should also adjust the " -"fraction of clients selected for training during each round (we don't want " -"all 1000 clients participating in every round), so we adjust " -"``fraction_fit`` to ``0.05``, which means that only 5% of available clients " -"(so 50 clients) will be selected for training each round:" -msgstr "" +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "" +#~ msgid "" +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values " -"between server and clients to fully customize client-side execution. With " -"that capability, we built a large-scale Federated Learning simulation using " -"the Flower Virtual Client Engine and ran an experiment involving 1000 " -"clients in the same workload - all in a Jupyter Notebook!" -msgstr "" +#~ msgid "" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -msgid "" -"The `Flower Federated Learning Tutorial - Part 3 `__ shows how " -"to build a fully custom ``Strategy`` from scratch." -msgstr "" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "연합 학습이란 무엇입니까?" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "|01471150fd5144c080a176b43e92a3ff|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated learning " -"systems that approach the current state of the art in the field." -msgstr "" -"이 튜토리얼에서 연합 학습이 무엇인지 배우고 Flower로 첫 번째 시스템을 " -"구축하고 점진적으로 확장해 나갈 것입니다. 본 튜토리얼의 모든 부분을 완성할 " -"수 있다면, 당신은 고급 연합 학습 시스템을 구축하여 그 분야의 현재 최고 기술 " -"수준에 접근할 수 있을 것입니다." +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "|9bc21c7dbd17444a8f070c60786e3484|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 -msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated " -"learning. Only a basic understanding of data science and Python programming " -"is assumed." -msgstr "" -"🧑‍🏫이 튜토리얼은 사전 지식을 많이 필요로 하지 않으며 연합 학습에 대해 " -"상세히알 필요는 없습니다. 데이터 과학과 파이썬 프로그래밍에 대한 기본적인 " -"이해만 가정합니다." +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "|3047bbce54b34099ae559963d0420d79|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join the " -"open-source Flower community on Slack to connect, ask questions, and get " -"help: `Join Slack `__ 🌼 We'd love to hear " -"from you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." -msgstr "" -"`Star Flower on GitHub `__ ⭐️ Slack의 " -"오픈소스 Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 " -"있습니다: `Slack 가입`__ 🌼 ``#introductions``" -"채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 있으시면``#questions`` " -"채널로 방문해 주시기 바랍니다." +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "전통적인 머신러닝(기계학습)" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "|c24c1478b30e4f74839208628a842d1e|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 -msgid "" -"Before we begin to discuss federated learning, let us quickly recap how most " -"machine learning works today." -msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 " -"간략히 요약하겠습니다." +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "|1b3613d7a58847b59e1d3180802dbc09|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 -msgid "" -"In machine learning, we have a model, and we have data. The model could be a " -"neural network (as depicted here), or something else, like classical linear " -"regression." -msgstr "" -"머신러닝에서 우리는 모델과 데이터를 가지고 있습니다. 모델은 신경망(그림과 " -"같이)일 수도 있고 고전적인 선형 회귀와 같은 다른 것일 수도 있습니다." +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "|9980b5213db547d0b8024a50992b9e3f|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" -msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "모델과 데이터" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "|032eb6fed6924ac387b9f13854919196|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 -msgid "" -"We train the model using the data to perform a useful task. A task could be " -"to detect objects in images, transcribe an audio recording, or play a game " -"like Go." -msgstr "" -"우리는 유용한 작업을 수행하기 위해 데이터를 사용하여 모델을 훈련합니다. " -"작업은 이미지 속 물체를 감지하거나 음성 녹음을 기록하거나 바둑과 같은 게임을 " -"하는 것일 수 있습니다." +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "|fbf225add7fd4df5a9bf25a95597d954|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" -msgstr "|01471150fd5144c080a176b43e92a3ff|" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "|7efbe3d29d8349b89594e8947e910525|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "데이터를 이용한 모델 훈련" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "|329fb3c04c744eda83bb51fa444c2266|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the " -"machine we train the model on. It gets created somewhere else." -msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 " -"아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "" -"스마트폰에서 사용자와 앱의 상호 작용, 센서 데이터를 수집하는 자동차, " -"키보드를 통해 입력을 받는 노트북 또는 누군가 노래를 부르리는 것을 듣는 " -"스마트 스피커에서 비롯됩니다." +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" -msgstr "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgid "run\\_supernode" +#~ msgstr "run\\_supernode" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "핸드폰에 있는 데이터" +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running " -"the same app. But it could also be several organizations, all generating " -"data for the same task." -msgstr "" -"또한 중요한 것은 이 \"다른 곳\"이 보통 한 곳만 아니라 여러 곳이라는 " -"것입니다. 같은 앱을 실행하는 여러 기기일 수도 있습니다. 하지만 여러 조직이 " -"모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" -msgstr "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "데이터가 여러 장치에 있습니다" +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach that " -"has been used in the past was to collect all data on a central server. This " -"server can be somewhere in a data center, or somewhere in the cloud." -msgstr "" -"따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 " -"서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 서버는 데이터 센터 " -"어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." +#~ msgid "run\\_server\\_app" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" -msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgid "run\\_superlink" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "중앙 데이터 수집" +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "" -"모든 데이터가 한 곳에 모이면, 우리는 궁극적으로 머신러닝 알고리즘을 사용하여 " -"데이터에서 모델을 훈련시킬 수 있습니다. 이것이 바로 우리가 기본적으로 사용해 " -"온 머신러닝 방법입니다." +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" -msgstr "|c24c1478b30e4f74839208628a842d1e|" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "중앙 데이터 훈련" +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "클래식 머신러닝의 어려움" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some " -"cases. Great examples include categorizing holiday photos, or analyzing web " -"traffic. Cases, where all the data is naturally available on a centralized " -"server." -msgstr "" -"우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 " -"있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 트래픽을 분석하는 것이 " -"있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" -msgstr "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "집중화 가능" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the data " -"is not available on a centralized server, or cases where the data available " -"on one server is not enough to train a good model." -msgstr "" -"그러나 이 방법은 다른 많은 경우에 적용되지 않을 수 있습니다. 예를 들어, 중앙 " -"집중식 서버에 데이터가 없거나 서버의 데이터가 좋은 모델을 훈련하기에 " -"충분하지 않을 수 있습니다." +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" -msgstr "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "집중화 불가능" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning approach " -"does not work for a large number of highly important real-world use cases. " -"Those reasons include:" -msgstr "" -"전통적인 중앙 집중식 머신러닝 방법이 현실 세계에서 매우 중요한 수많은 사용 " -"사례를 충족시킬 수 없는 이유가 있습니다. 이유는 다음과 같은 여러 가지가 " -"있습니다:" +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own users' " -"data for artificial intelligence training because those users live in " -"different parts of the world, and their data is governed by different data " -"protection regulations." -msgstr "" -"**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), " -"PDPL (아르헨티나), KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR " -"(중국), PDPB (인도), PIPA (한국), APPI (일본), PDP (인도네시아), PDPA " -"(싱가포르), APP (호주)등의 법규로 민감한 데이터가 이동하지 않도록 보호하고 " -"있습니다. 실제로 이러한 규정은 사용자가 세계의 다른 지역에 살고 데이터가 " -"다른 데이터 보호 규정에 의해 통제되기 때문에 단일 조직이 자체 사용자 " -"데이터를 인공 지능 학습에 사용하는 것을 방지하기도 합니다." +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where " -"users just expect that no data leaves their device, ever. If you type your " -"passwords and credit card info into the digital keyboard of your phone, you " -"don't expect those passwords to end up on the server of the company that " -"developed that keyboard, do you? In fact, that use case was the reason " -"federated learning was invented in the first place." -msgstr "" -"**사용자 선호도**: 규정 외에도 일부 사용 사례에서 사용자는 데이터가 자기 " -"장치를 떠나지 않기를 예상합니다. 휴대폰의 디지털 키보드에 비밀번호와 " -"신용카드 정보를 입력하면 비밀번호가 해당 키보드를 개발한 회사의 서버에 뜨길 " -"원하지는 않겠죠? 사실, 이 사용 사례가 애당초 연합 학습이 발명된 이유였습니다." +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data volume " -"that it is neither feasible nor economic to collect all the data (due to, " -"for example, bandwidth or communication efficiency). Think about a national " -"rail service with hundreds of train stations across the country. If each of " -"these train stations is outfitted with a number of security cameras, the " -"volume of raw on-device data they produce requires incredibly powerful and " -"exceedingly expensive infrastructure to process and store. And most of the " -"data isn't even useful." -msgstr "" -"**데이터 볼륨**: 일부 센서(예:카메라)는 너무 많은 데이터 볼륨을 생성하여 " -"모든 데이터를 수집하는 것이 실현 가능하지도 않고 경제적이지도 않습니다(예: " -"대역폭 또는 통신 효율로 인해). 전국에 수백 개 기차역이 있는 국가 철도 " -"서비스를 생각해 보세요. 각 기차역에 수 많은 보안 카메라가 설치되어 있다면, " -"그들이 생산하는 대량의 미가공 된 온디바이스 데이터는 처리 및 저장을 위해 " -"엄청나게 강력하고 매우 비싼기반 구조를 필요로 합니다. 그런데 대부분의 " -"데이터는 유용하지도 않습니다." +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial fraud" -msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave `__ browser or the `Signal `__ messenger shows " -"that users care about privacy. In fact, they choose the privacy-enhancing " -"version over other alternatives, if such an alternative exists. But what can " -"we do to apply machine learning and data science to these cases to utilize " -"private data? After all, these are all areas that would benefit " -"significantly from recent advances in AI." -msgstr "" -"`Brave `__ 브라우저나 `Signal `__ " -"메신저와 같은 개인 정보 보호 시스템의 인기는 사용자들이 개인 정보 보호에 " -"신경 쓴다는 것을 보여줍니다. 실제로 그러한 대안이 존재하는 경우 다른 " -"대안보다 개인 정보 보호 강화 버전을 선택합니다. 그런데 이러한 사례에 " -"머신러닝 및 데이터 과학을 적용하여 프라이버시 데이터를 활용하려면 어떻게 " -"해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 " -"분야입니다." +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "연합 학습" +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead of " -"moving the data to the training. Here's the single-sentence explanation:" -msgstr "" -"연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 " -"컴퓨팅 능력을 데이터가 생성되는 장소로 이동 시킴으로써 분산된 데이터에서 " -"머신러닝을 실현합니다. 요약하자면:" +#~ msgid "Dependencies" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data science " -"approaches) in areas where it wasn't possible before. We can now train " -"excellent medical AI models by enabling different hospitals to work " -"together. We can solve financial fraud by training AI models on the data of " -"different financial institutions. We can build novel privacy-enhancing " -"applications (such as secure messaging) that have better built-in AI than " -"their non-privacy-enhancing alternatives. And those are just a few of the " -"examples that come to mind. As we deploy federated learning, we discover " -"more and more areas that can suddenly be reinvented because they now have " -"access to vast amounts of previously inaccessible data." -msgstr "" -"이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 " -"사용할 수 있습니다. 이제 다양한 병원이 협력할 수 있도록 함으로써 우수한 의료 " -"AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 " -"훈련하여 금융 사기를 해결할 수 있습니다. 개인 정보 보호를 강화하지 않는 " -"대안보다 더 나은 AI가 내장된 새로운 개인 정보 보호 강화 애플리케이션(예: " -"보안 메시징)을 구축할 수 있습니다. 그것들은 떠오르는 몇 가지 예에 " -"불과합니다. 연합 학습을 구축함에 따라 이전에 액세스할 수 없었던 많은 " -"데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 " -"많아지고 있습니다." +#~ msgid "Handling the data" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an intuitive " -"explanation." -msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "연합 학습의 5단계" +#~ msgid "Training and testing the model" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "0단계: 글로벌 모델 초기화" +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the same " -"in classic centralized learning: we initialize the model parameters, either " -"randomly or from a previously saved checkpoint." -msgstr "" -"서버에서 모델을 초기화하는 것으로 시작합니다. 이것은 전통적인 중앙 집중식 " -"학습과도 동일합니다: 임의로 또는 이전에 저장된 체크포인트에서 모델 " -"매개변수를 초기화합니다." +#~ msgid "Creating the model itself" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "글로벌 모델 초기화" +#~ msgid "Federating the example" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client " -"nodes)" -msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" +#~ msgid "Creating the IMDBClient" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts their " -"local training using the same model parameters. We often use only a few of " -"the connected nodes instead of all nodes. The reason for this is that " -"selecting more and more client nodes has diminishing returns." -msgstr "" -"다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 " -"에지 디바이스 또는 조직에 속한 서버)로 보냅니다. 이것은 각 참여 노드가 " -"동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. " -"일반적으로 모든 노드가 아닌 몇 개의 연결 노드만 사용합니다. 그 이유는 점점 " -"더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" -msgstr "|032eb6fed6924ac387b9f13854919196|" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "글로벌 모델 전송" +#~ msgid "Starting the server" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device (client " -"node)" -msgstr "2단계: 각 조직/장치(클라이언트 노드)의 데이터에 대해 로컬로 모델 훈련" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the global " -"model parameters, they start the local training. They use their own local " -"dataset to train their own local model. They don't train the model until " -"full convergence, but they only train for a little while. This could be as " -"little as one epoch on the local data, or even just a few steps (mini-" -"batches)." -msgstr "" -"이제 모든(선택된) 클라이언트 노드에는 최신 버전의 글로벌 모델 파라미터가 " -"있으며 로컬 훈련을 시작합니다. 그들은 자신의 로컬 데이터 세트를 사용하여 " -"자신의 로컬 모델을 훈련합니다. 모델이 완전히 수렴할 때까지 훈련하지 않고 " -"잠시만 훈련합니다. 이는 로컬 데이터에서 한 단계 정도로 짧거나 몇 단계(mini-" -"batches)에 불과할 수 있습니다." +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" -msgstr "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "Putting everything together" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "로컬 데이터에 대한 훈련" +#~ msgid "We can now start client instances using:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "3단계: 모델 파라미터를 업데이트하여 서버로 되돌리기" +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version of " -"the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the server. " -"The model updates they send can either be the full model parameters or just " -"the gradients that were accumulated during local training." -msgstr "" -"로컬 훈련 후에는 클라이언트 노드마다 원래 받은 모델 파라미터의 버전이 조금씩 " -"다릅니다. 파라미터가 다른 이유는 각 클라이언트 노드의 로컬 데이터 세트에 " -"다른 데이터가 있기 때문입니다. 그런 다음 클라이언트 노드는 이러한 모델 " -"업데이트를 서버로 다시 보냅니다. 보내는 모델 업데이트는 전체 모델 " -"파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" -msgstr "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "모델 업데이트 전송" +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "4단계: 모델 업데이트를 새 글로벌 모델로 집계" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of the " -"original global model, each trained on the local data of one client. But " -"didn't we want to have one model that contains the learnings from the data " -"of all 100 client nodes?" -msgstr "" -"서버는 선택된 클라이언트 노드들로부터 모델 업데이트들을 수신합니다. 서버가 " -"100개의 클라이언트 노드를 선택했다면 이제 각각 클라이언트의 로컬 데이터를 " -"기반으로 훈련된 100개의 조금씩 다른 원래 글로벌 모델 버전을 갖게 됩니다. " -"하지만 우리는 100개의 모든 클라이언트 노드의 데이터에서 학습한 내용을 " -"포함하는 모델을 하나만 갖고 싶지 않았습니까?" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model updates " -"we received from the client nodes. This process is called *aggregation*, and " -"there are many different ways to do it. The most basic way to do it is " -"called *Federated Averaging* (`McMahan et al., 2016 `__), often abbreviated as *FedAvg*. *FedAvg* takes the 100 " -"model updates and, as the name suggests, averages them. To be more precise, " -"it takes the *weighted average* of the model updates, weighted by the number " -"of examples each client used for training. The weighting is important to " -"make sure that each data example has the same \"influence\" on the resulting " -"global model. If one client has 10 examples, and another client has 100 " -"examples, then - without weighting - each of the 10 examples would influence " -"the global model ten times as much as each of the 100 examples." -msgstr "" -"단일 모델 하나를 얻으려면 클라이언트 노드에서 받은 모든 모델 업데이트를 " -"결합해야 합니다. 이 과정이 *집합*라고 하며 여러 가지 방법이 있습니다. 가장 " -"기본적인 방법은*Federated Averaging* (`McMahan et al., 2016 `__)이라고 하고 보통 줄여서 *FedAvg*로 표기합니다. " -"*FedAvg* 는 100개의 모델 업데이트를 받아 이름에서 알 수 있듯이 모델 " -"업데이트를 평균화합니다. 더 정확히 말하면, 모델 업데이트의 *가중 평균* 을 각 " -"클라이언트가 훈련에 사용한 예제 수에 따라 가중치를 부여합니다. 가중치는 각 " -"데이터 예제가 결과 글로벌 모델에 동일한 \"영향\" 을 미치는지 확인하는 데 " -"중요합니다. 한 클라이언트에 10개의 데이터 포인트가 있고 다른 클라이언트에 " -"100개의 데이터 포인트가 있다면 가중치를 부여하지 않고 10개의 예가 100개의 " -"사례보다 글로벌 모델에 10배 더 많은 영향을 미칩니다." +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" -msgstr "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "모델 업데이트 집계" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "5단계: 모델이 수렴할 때까지 1~4단계를 반복합니다" +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step 1), " -"the client nodes train on their local data (step 2), they send their updated " -"models to the server (step 3), and the server then aggregates the model " -"updates to get a new version of the global model (step 4)." -msgstr "" -"단계 1에서 4는 우리가 말하는 단일 라운드 연합 학습입니다. 글로벌 모델 " -"파라미터는 참여하는 클라이언트 노드에 전송되고(1단계), 클라이언트 노드는 " -"로컬 데이터에 대한 훈련을 받고(2단계), 업데이트된 모델을 서버에 " -"전송하고(3단계), 서버는 모델 업데이트를 집계하여 글로벌 모델의 새로운 버전을 " -"얻습니다(4단계)." +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that iteration " -"only trains for a little while. This means that after the aggregation step " -"(step 4), we have a model that has been trained on all the data of all " -"participating client nodes, but only for a little while. We then have to " -"repeat this training process over and over again to eventually arrive at a " -"fully trained model that performs well across the data of all client nodes." -msgstr "" -"한 라운드의 반복에서 해당 반복에 참여하는 각 클라이언트 노드는 짧은 시간 " -"동안만 훈련합니다. 집계 단계(4단계) 이후 우리 모델이 관련된 모든 클라이언트 " -"노드의 모든 데이터에 대해 잠시 동안만 훈련되었음을 의미합니다. 그런 다음 " -"모든 클라이언트 노드의 데이터에서 잘 작동하는 완전히 훈련된 모델에 " -"도달하려면 이 훈련 과정을 계속 반복해야 합니다." +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning in " -"a nutshell. In later parts of this tutorial, we will go into more detail. " -"Interesting questions include: How can we select the best client nodes that " -"should participate in the next round? What's the best way to aggregate model " -"updates? How can we handle failing client nodes (stragglers)?" -msgstr "" -"축하합니다, 이제 연합 학습의 기초에 대해 알게 되었습니다. 물론 아직 논의해야 " -"할 내용이 많지만 이는 연합 학습의 축소판일 뿐입니다. 본 튜토리얼의 " -"후반부에는 좀 더 자세히 설명하겠습니다. 흥미로운 질문은 다음과 같습니다: " -"다음 라운드에 참여해야 할 가장 좋은 클라이언트 노드를 어떻게 선택할 수 " -"있을까요? 모델 업데이트를 집계하는 가장 좋은 방법은 무엇일까요? 실패한 " -"클라이언트 노드(낙오자)를 어떻게 처리할 수 있을까요?" +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different client " -"nodes, we can also evaluate the model on that data to receive valuable " -"metrics. This is called federated evaluation, sometimes abbreviated as FE. " -"In fact, federated evaluation is an integral part of most federated learning " -"systems." -msgstr "" -"다양한 클라이언트 노드의 분산된 데이터에 대해 모델을 훈련할 수 있는 것처럼 " -"해당 데이터에 대한 모델을 평가하여 가치 있는 메트릭(metrics)을 받을 수도 " -"있습니다. 이를 연합 평가라고 하며 FE라고 약칭하기도 합니다. 사실 연합 평가는 " -"대부분의 연합 학습 시스템에서 필수적인 부분입니다." +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "연합 분석" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from data. " -"Data analysis can yield valuable insights, but again, there's often not " -"enough data to get a clear answer. What's the average age at which people " -"develop a certain type of health condition? Federated analytics enables such " -"queries over multiple client nodes. It is usually used in conjunction with " -"other privacy-enhancing technologies like secure aggregation to prevent the " -"server from seeing the results submitted by individual client nodes." -msgstr "" -"많은 경우 머신러닝은 데이터로부터 가치를 얻기 위한 필수 조건이 아닙니다. " -"데이터 분석을 통해 귀중한 통찰력을 얻을 수 있지만, 명확한 답변을 얻기에는 " -"데이터가 충분하지 않은 경우가 많습니다. 특정 유형의 건강 상태가 발생하는 " -"평균 연령은 몇 살입니까? 연합 분석을 사용하면 여러 클라이언트 노드에서 " -"이러한 쿼리(query)를 실행할 수 있습니다. 서버가 단일 클라이언트 노드에서 " -"제출한 결과를 보지 못하도록 보안을 강화한 집합 방식과 같은 다른 프라이버시 " -"향상 기술과 함께 자주 사용됩니다." +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and sharing " -"statistical data, ensuring the privacy of individual participants. DP " -"achieves this by adding statistical noise to the model updates, ensuring any " -"individual participants’ information cannot be distinguished or re-" -"identified. This technique can be considered an optimization that provides a " -"quantifiable privacy protection measure." -msgstr "" -"차분 프라이버시(Differential Privacy)는 연합 학습의 맥락에서 종종 " -"언급됩니다. 통계 데이터를 분석하고 공유할 때 사용하는 프라이버시 보호 " -"방식으로, 참가자 개인의 프라이버시를 보장합니다. 차분 프라이버시는 모델 " -"업데이트에 통계적 잡음(noise)를 추가하여 개별 참가자의 정보를 구별하거나 " -"재식별할 수 없도록 함으로써 이를 달성합니다. 이 기술은 정량적 개인 정보 보호 " -"조치를 제공하는 최적화라고 볼 수 있습니다." +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require " -"infrastructure to move machine learning models back and forth, train and " -"evaluate them on local data, and then aggregate the updated models. Flower " -"provides the infrastructure to do exactly that in an easy, scalable, and " -"secure way. In short, Flower presents a unified approach to federated " -"learning, analytics, and evaluation. It allows the user to federate any " -"workload, any ML framework, and any programming language." -msgstr "" -"연합 학습, 연합 평가 및 연합 분석은 머신러닝 모델을 앞뒤로 이동하고 로컬 " -"데이터에 대해 훈련 및 평가한 다음 업데이트된 모델을 통합하기 위한 기본 " -"프레임워크가 필요합니다. Flower가 제공하는 기반 구조는 간단하고 확장 " -"가능하며 안전한 방식으로 이러한 목표를 달성합니다. 간단히 말해서, Flower는 " -"연합 학습, 분석 및 평가를 위한 통합 접근 방식을 제공합니다. 이를 통해 " -"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 " -"수 있습니다." +#~ msgid "Each client will have its own dataset." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal " -"computer, roomba, and phone)" -msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, " -"룸바, 전화)" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and how " -"it relates to the classic (centralized) machine learning!" -msgstr "축하합니다, 지금까지 당신은 연합 학습의 기본 지식과 그것이 어떻게 전통적 (" -"중앙 집중식) 머신러닝과 관련되는지 배웠습니다!" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first federated " -"learning system with Flower." -msgstr "이 튜토리얼의 다음 부분에서는 Flower와 함께 첫 번째 연합 학습 시스템을 " -"구축할 것입니다." +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 `__ shows how to " -"build a simple federated learning system with PyTorch and Flower." -msgstr "" -"`Flower 연합 학습 튜토리얼- 1부 `__ PyTorch와 Flower를 사용하여 간단한 " -"연합 학습 시스템을 구축하는 방법을 보여줍니다." +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "" -#~ "Currently, Flower provides two images, a ``base`` image and a " -#~ "``superlink`` image. The base image, as the name suggests, contains basic " -#~ "dependencies that the SuperLink needs. This includes system dependencies, " -#~ "Python and Python tools. The SuperLink image is based on the base image, " -#~ "but it additionally installs the SuperLink using ``pip``." +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ "현재, Flower는 \"base\" 이미지 그리고 \"superlink\" 이미지를 제공합니다. " -#~ "base 이미지는 이름에서 알 수 있듯이 SuperLink가 필요로 하는 기본 " -#~ "dependencies를 포함하고 있습니다. 여기에는 시스템 dependencies, Python 및 " -#~ "Python 도구가 포함됩니다. SuperLink 이미지는 base 이미지를 기반으로 하지" -#~ "만 \"pip\"을 사용하여 SuperLink를 추가로 설치합니다." -#~ msgid "``3.11``" -#~ msgstr "``3.11``" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "``22.04``이 기본값." +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "``flwr/base``이 기본값." +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "base 이미지의 Python 버전." +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "``py3.11``이 기본값." +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "``ubuntu22.04``이 기본값." +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "``flwr``이 기본값." +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" -#~ msgid "" -#~ "The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -#~ "that the build arguments as well as the name and tag can be adapted to " -#~ "your needs. These values serve as examples only." +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ "이미지의 이름은 ``flwr_superlink``이고 태그는 ``0.1.0``입니다. 필요에 따" -#~ "라 빌드 argument들 뿐만 아니라 이름과 태그도 정할 수 있습니다. 이 값들은 " -#~ "예시일 뿐입니다." + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index e50c290432cc..44223940cdce 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -17,52 +17,198 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Arquitetura do Flower" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +msgid "Flower public API" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Engine do Edge Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +msgid "Flower public API of private packages" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Edge Client" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Engine do Virtual Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:100 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Virtual Client" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." msgstr "" -"Engine do Virtual Client e do Edge Client no mesma carga de trabalho " -"(workload)" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ arquitetura principal do framework com " -"ambas engines do Virtual Client e do Edge Client" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "Como construir imagens Docker do Flower localmente" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -92,24 +238,15 @@ msgstr "" "ambiente de desenvolvimento local." #: ../../source/contributor-how-to-build-docker-images.rst:12 -msgid "Clone the flower repository." +#, fuzzy +msgid "Clone the ``flower`` repository." msgstr "Clone o repositório do flower." #: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 msgid "Verify the Docker daemon is running." msgstr "Verifique que o serviço Docker está rodando." #: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"Por favor, siga a primeira seção em :doc:`Execute o Flower usando Docker " -"` que cobre este passo em mais detalhes." - -#: ../../source/contributor-how-to-build-docker-images.rst:25 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " @@ -119,7 +256,7 @@ msgstr "" "respectivos Dockerfiles. Você pode encontrá-los nos subdiretórios " "``src/docker```." -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:23 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -139,150 +276,175 @@ msgstr "" "Todos os argumentos de compilação disponíveis para cada imagem estão " "listados em uma das tabelas abaixo." -#: ../../source/contributor-how-to-build-docker-images.rst:35 -msgid "Building the base image" +#: ../../source/contributor-how-to-build-docker-images.rst:30 +#, fuzzy +msgid "Building the Base Image" msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:36 #: ../../source/contributor-how-to-build-docker-images.rst:98 msgid "Build argument" msgstr "Argumento de compilação" -#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:37 #: ../../source/contributor-how-to-build-docker-images.rst:99 msgid "Description" msgstr "Descrição" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:38 #: ../../source/contributor-how-to-build-docker-images.rst:100 msgid "Required" msgstr "Necessário" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:39 #: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/docker/persist-superlink-state.rst:18 +#: ../../source/docker/pin-version.rst:11 +#: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "Exemplo" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:40 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:41 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 +#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "No" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:44 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:48 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid "Version of ``python`` to be installed." msgstr "Versão do ``python`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:52 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "Version of ``pip`` to be installed." msgstr "Versão do ``pip`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "Yes" msgstr "Sim" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -msgid "``23.0.1``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:56 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid "Version of ``setuptools`` to be installed." msgstr "Versão do ``setuptools`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:64 -msgid "``69.0.2``" -msgstr "``69.0.2``" +#: ../../source/contributor-how-to-build-docker-images.rst:59 +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:60 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:61 msgid "Version of Flower to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.7.0``" +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:64 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:65 #, fuzzy msgid "The Flower package to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "`Direct Reference Examples`_" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:73 #, fuzzy msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" "O exemplo seguinte cria uma imagem base com Python 3.11.0, pip 23.0.1 e " "setuptools 69.0.2:" #: ../../source/contributor-how-to-build-docker-images.rst:88 +#, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" "O nome da imagem é ``flwr_base`` com a tag ``0.1.0``. Lembre-se que os " "argumentos de construção assim como o nome e a tag podem ser adaptados de" @@ -290,8 +452,8 @@ msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:92 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "Construindo a imagem do servidor" +msgid "Building a Flower Binary Image" +msgstr "Construindo a imagem base" #: ../../source/contributor-how-to-build-docker-images.rst:102 msgid "``BASE_REPOSITORY``" @@ -317,24 +479,21 @@ msgid "The Tag of the Flower base image." msgstr "O nome do repositório da imagem base." #: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:111 -#, fuzzy msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -"O exemplo a seguir cria uma imagem de servidor com a imagem base oficial " -"do Flower py3.11-ubuntu22.04 e Flower 1.7.0:" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:121 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "Se você quiser usar sua própria imagem base ao invés da imagem oficial " "base do Flower, tudo que você precisa fazer é definir os argumentos " @@ -343,10 +502,14 @@ msgstr "" "sua imagem e o valor de ``BASE_IMAGE_TAG`` deve corresponder à tag da sua" " imagem." -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:132 msgid "After creating the image, we can test whether the image is working:" msgstr "Depois de criar a imagem, podemos testar se a imagem está funcionando:" +#: ../../source/contributor-how-to-build-docker-images.rst:139 +msgid "Direct Reference Examples" +msgstr "" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "Contribua com traduções" @@ -695,8 +858,8 @@ msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" #: ../../source/contributor-how-to-install-development-versions.rst:42 @@ -815,145 +978,96 @@ msgstr "" msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "Versão da imagem Docker oficial do Ubuntu." - #: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -msgid "**Via the UI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -961,26 +1075,26 @@ msgid "" "11 on precedence)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:73 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:75 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:77 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" @@ -1003,7 +1117,7 @@ msgstr "" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 #: ../../source/how-to-install-flower.rst:8 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" @@ -1817,10 +1931,10 @@ msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" #: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "" @@ -1918,12 +2032,15 @@ msgid "Get started as a contributor" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" +msgid "`Python 3.9 `_ or above" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 @@ -1950,7 +2067,7 @@ msgid "Developer Machine Setup" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" +msgid "Preliminaries" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2006,14 +2123,14 @@ msgstr "" msgid "" "If you don't have :code:`pyenv` installed, the following script that will" " install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 msgid "" "If you already have :code:`pyenv` installed (along with the :code:`pyenv-" "virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 @@ -2144,22765 +2261,27928 @@ msgstr "" msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#: ../../source/docker/enable-tls.rst:2 +msgid "Enable TLS for Secure Connections" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" +#: ../../source/docker/enable-tls.rst:7 +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:14 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" +#: ../../source/docker/enable-tls.rst:27 +msgid "SuperLink" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 -msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 -msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:213 +#: ../../source/docker/tutorial-quickstart-docker.rst:300 +msgid "``docker run``: This tells Docker to run a container from an image." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:214 +#: ../../source/docker/tutorial-quickstart-docker.rst:301 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 -msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/docker/enable-tls.rst +msgid "inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 -msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +#: ../../source/docker/enable-tls.rst +msgid "SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" +#: ../../source/docker/enable-tls.rst:71 +msgid "SuperNode" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst:73 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst:78 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst +msgid "directory inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 -msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" +#: ../../source/docker/enable-tls.rst +msgid "" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/enable-tls.rst msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/enable-tls.rst msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/index.rst:4 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/index.rst:7 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 -msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +#: ../../source/docker/index.rst:11 +msgid "Getting Started" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" +#: ../../source/docker/index.rst:20 +msgid "Running in Production" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +#: ../../source/docker/index.rst:29 +msgid "Advanced Options" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +#: ../../source/docker/index.rst:41 +msgid "Run Flower using Docker Compose" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 -msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/persist-superlink-state.rst:10 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/docker/persist-superlink-state.rst:20 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/persist-superlink-state.rst:35 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/pin-version.rst:4 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/pin-version.rst:13 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +#: ../../source/docker/pin-version.rst:22 +msgid "This will output" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +#: ../../source/docker/pin-version.rst:29 +msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +#: ../../source/docker/run-as-root-user.rst:4 +msgid "" +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" +#: ../../source/docker/run-as-root-user.rst:8 +msgid "" +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "" +#: ../../source/docker/run-as-root-user.rst:29 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Construindo a imagem do servidor" -#: ../../source/explanation-differential-privacy.rst:3 -msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +#: ../../source/docker/run-as-subprocess.rst:2 +msgid "Run ClientApp as a Subprocess" msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-as-subprocess.rst:16 +msgid "Dockerfile.supernode" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-as-subprocess.rst:30 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Verifique que o serviço Docker está rodando." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:54 -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +msgid "Run the Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +msgid "Build and start the services using the following command:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +msgid "pyproject.toml" msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "Exemplo" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 -msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +msgid "Limitations" msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +msgid "Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +msgid "quickstart-fastai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 -msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +msgid "quickstart-huggingface" msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 -msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +msgid "quickstart-jax" msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +msgid "quickstart-mlcube" msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +msgid "quickstart-mlx" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +msgid "quickstart-monai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +msgid "quickstart-pandas" msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +msgid "quickstart-pytorch" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 -msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +msgid "quickstart-tabnet" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +msgid "quickstart-tensorflow" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/set-environment-variables.rst:2 +msgid "Set Environment Variables" +msgstr "" + +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +msgid "Quickstart with Docker" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 -msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 -msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +msgid "Step 2: Start the SuperLink" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:215 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:216 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:144 +msgid "" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst:148 +msgid "" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +msgid "Dockerfile.clientapp" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" +#: ../../source/docker/tutorial-quickstart-docker.rst:184 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst:189 msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:198 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 -msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +msgid "Start the first ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 -msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:233 +msgid "Step 5: Start the SuperExec" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:235 msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:240 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "Dockerfile.superexec" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +msgid "" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." +#: ../../source/docker/tutorial-quickstart-docker.rst:285 +msgid "Start the SuperExec container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst:315 +msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +#: ../../source/docker/tutorial-quickstart-docker.rst:317 +msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." +#: ../../source/docker/tutorial-quickstart-docker.rst:326 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst:332 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" +#: ../../source/docker/tutorial-quickstart-docker.rst:339 +msgid "Step 7: Update the Application" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst:341 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 -msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +#: ../../source/docker/tutorial-quickstart-docker.rst:351 +msgid "Stop the current ClientApp containers:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "" +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Construindo a imagem base" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +#: ../../source/docker/tutorial-quickstart-docker.rst:363 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." +#: ../../source/docker/tutorial-quickstart-docker.rst:378 +msgid "Run the updated project:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +#: ../../source/docker/tutorial-quickstart-docker.rst:385 +msgid "Step 8: Clean Up" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst:387 +msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:399 +msgid "Where to Go Next" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:401 +msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:402 +msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +#: ../../source/docker/tutorial-quickstart-docker.rst:403 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +msgid "Quickstart with Docker Compose" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 +msgid "" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 +msgid "" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +msgid "Step 2: Run Flower in Insecure Mode" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 +msgid "" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +msgid "" +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 -msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +msgid "Step 4: Update the Application" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +msgid "Rebuild and restart the services." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 -msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 -msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +msgid "Run the command:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -#: ../../source/how-to-configure-clients.rst:24 -msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "" - -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -#: ../../source/how-to-configure-clients.rst:32 -msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 +msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +msgid "compose.yml" msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 -msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +msgid "Restart the services:" msgstr "" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-configure-logging.rst:130 -msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +msgid "Remove all services and volumes:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/docker/use-a-different-version.rst:9 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +msgid "Centralized Training" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:41 -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +msgid "You can now run your machine learning workload:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:54 -msgid "Client (SuperNode)" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +msgid "Federated Training" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"Our example consists of one *server* and two *clients*. In FedBN, " +":code:`server.py` keeps unchanged, we can start the server directly." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Finally, we will revise our *client* logic by changing " +":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " +"we will exclude batch normalization parameters from model parameter list " +"when sending to or receiving from the server." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +msgid "Now, you can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 +#: ../../source/tutorial-quickstart-jax.rst:283 +msgid "Next Steps" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +msgid "" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"Let's create a new file called :code:`cifar.py` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as :code:`torch` and :code:`torchvision`) need " +"to be imported. You can see that we do not import any package for " +"federated learning. You can keep all these imports as they are even when " +"we add the federated learning components at a later point." msgstr "" -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in :code:`class Net()`." msgstr "" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"The :code:`load_data()` function loads the CIFAR-10 training and test " +"sets. The :code:`transform` normalized the data after loading." msgstr "" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +msgid "" +"We now need to define the training (function :code:`train()`) which loops" +" over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +msgid "" +"The evaluation of the model is defined in the function :code:`test()`. " +"The function loops over all test samples and measures the loss of the " +"model based on the test dataset." msgstr "" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in :code:`cifar.py` for the *clients* that are connected to " +"the *server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:129 +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +":code:`server.py` first. The *server* needs to import the Flower package " +":code:`flwr`. Next, we use the :code:`start_server` function to start a " +"server and tell it to perform three rounds of federated learning." msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 -msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "We can already start the *server*:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined centralized training in :code:`cifar.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " +"update the parameters on our PyTorch model:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:234 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +msgid ":code:`set_parameters`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:166 msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"set the model parameters on the local model that are received from the " +"server" msgstr "" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 +#: ../../source/tutorial-quickstart-jax.rst:168 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"loop over the list of model parameters received as NumPy " +":code:`ndarray`'s (think list of neural network layers)" msgstr "" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#: ../../source/tutorial-quickstart-jax.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid ":code:`get_parameters`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 +#: ../../source/tutorial-quickstart-jax.rst:170 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"get the model parameters and return them as a list of NumPy " +":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" -#: ../../source/how-to-implement-strategies.rst:258 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid ":code:`fit`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:176 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +msgid "train the model on the local training set" msgstr "" -#: ../../source/how-to-implement-strategies.rst:265 -msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +msgid "get the updated local model weights and return them to the server" msgstr "" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid ":code:`evaluate`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:281 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 +#: ../../source/tutorial-quickstart-jax.rst:177 +msgid "evaluate the updated model on the local test set" msgstr "" -#: ../../source/how-to-implement-strategies.rst:283 -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +msgid "return the local loss and accuracy to the server" msgstr "" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`cifar.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." msgstr "" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +msgid "" +"All that's left to do it to define a function that loads both model and " +"data, creates a :code:`CifarClient`, and starts this client. You load " +"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP address we used in :code:`server.py`:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:293 -msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 +#: ../../source/tutorial-quickstart-jax.rst:274 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/explanation-differential-privacy.rst:3 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/explanation-differential-privacy.rst:6 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" +#: ../../source/explanation-differential-privacy.rst:12 +msgid "" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" +#: ../../source/explanation-differential-privacy.rst:22 +msgid "" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/explanation-differential-privacy.rst:25 +msgid "Formal Definition" msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-differential-privacy.rst:26 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-differential-privacy.rst:32 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" -msgstr "" - -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" -msgstr "" - -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:38 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/how-to-install-flower.rst:36 -msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +#: ../../source/explanation-differential-privacy.rst:45 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/explanation-differential-privacy.rst:46 +msgid "" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" +#: ../../source/explanation-differential-privacy.rst:53 +msgid "Differential Privacy in Federated Learning" msgstr "" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:54 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" +#: ../../source/explanation-differential-privacy.rst:58 +msgid "" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" +#: ../../source/explanation-differential-privacy.rst:60 +msgid "" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/explanation-differential-privacy.rst:63 +msgid "" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" +#: ../../source/explanation-differential-privacy.rst:65 +msgid "" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -#: ../../source/how-to-install-flower.rst:65 -msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +msgid "Central Differential Privacy" msgstr "" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:69 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" +#: ../../source/explanation-differential-privacy.rst:76 +msgid "" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/how-to-install-flower.rst:76 -msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:89 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" +#: ../../source/explanation-differential-privacy.rst:94 +msgid "Clipping" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:96 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:98 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" +#: ../../source/explanation-differential-privacy.rst:100 +msgid "" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-differential-privacy.rst:102 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:18 -msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 +msgid "Local Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" +#: ../../source/explanation-differential-privacy.rst:107 +msgid "" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/explanation-differential-privacy.rst:116 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:118 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:125 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:84 -msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +#: ../../source/explanation-differential-privacy.rst:131 +msgid "**References:**" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" +#: ../../source/explanation-differential-privacy.rst:133 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:135 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." +#: ../../source/explanation-differential-privacy.rst:139 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 -msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/explanation-federated-evaluation.rst:4 +msgid "" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 -msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" +#: ../../source/explanation-federated-evaluation.rst:58 +msgid "Custom Strategies" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-federated-evaluation.rst:60 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"The :code:`Strategy` abstraction provides a method called " +":code:`evaluate` that can directly be used to evaluate the current global" +" model parameters. The current server implementation calls " +":code:`evaluate` after parameter aggregation and before federated " +"evaluation (see next paragraph)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:136 -msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +#: ../../source/explanation-federated-evaluation.rst:65 +msgid "Federated Evaluation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" +#: ../../source/explanation-federated-evaluation.rst:68 +msgid "Implementing Federated Evaluation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"Client-side evaluation happens in the :code:`Client.evaluate` method and " +"can be configured from the server side." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." +#: ../../source/explanation-federated-evaluation.rst:101 +msgid "Configuring Federated Evaluation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-federated-evaluation.rst:103 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-federated-evaluation.rst:105 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." +":code:`fraction_evaluate`: a :code:`float` defining the fraction of " +"clients that will be selected for evaluation. If " +":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " +"are connected to the server, then :code:`10` will be randomly selected " +"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " +"federated evaluation will be disabled." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-federated-evaluation.rst:106 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " +"clients to be selected for evaluation. If :code:`fraction_evaluate` is " +"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " +":code:`100` clients are connected to the server, then :code:`20` clients " +"will be selected for evaluation." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-federated-evaluation.rst:107 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +":code:`min_available_clients`: an :code:`int` that defines the minimum " +"number of clients which need to be connected to the server before a round" +" of federated evaluation can start. If fewer than " +":code:`min_available_clients` are connected to the server, the server " +"will wait until more clients are connected before it continues to sample " +"clients for evaluation." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/explanation-federated-evaluation.rst:108 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +":code:`on_evaluate_config_fn`: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:226 -msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +#: ../../source/explanation-federated-evaluation.rst:135 +msgid "Evaluating Local Model Updates During Training" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-federated-evaluation.rst:137 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"Model parameters can also be evaluated during training. " +":code:`Client.fit` can return arbitrary evaluation results as a " +"dictionary:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" +#: ../../source/explanation-federated-evaluation.rst:177 +msgid "Full Code Example" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-federated-evaluation.rst:179 msgid "" -"Ray Dashboard: ``_" -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:3 +msgid "Flower Architecture" +msgstr "Arquitetura do Flower" -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/explanation-flower-architecture.rst:5 msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:21 +#: ../../source/explanation-flower-architecture.rst:8 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/explanation-flower-architecture.rst:12 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:32 -msgid "Flower SuperLink" +#: ../../source/explanation-flower-architecture.rst:16 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:35 -msgid "Quickstart" +#: ../../source/explanation-flower-architecture.rst:24 +msgid "Hub-and-spoke topology in federated learning" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" +#: ../../source/explanation-flower-architecture.rst:24 +msgid "" +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/explanation-flower-architecture.rst:26 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:49 +#: ../../source/explanation-flower-architecture.rst:31 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:53 -msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +#: ../../source/explanation-flower-architecture.rst:36 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 +#: ../../source/explanation-flower-architecture.rst:38 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:65 +#: ../../source/explanation-flower-architecture.rst:41 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" +#: ../../source/explanation-flower-architecture.rst:47 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/explanation-flower-architecture.rst:49 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/explanation-flower-architecture.rst:53 msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:95 +#: ../../source/explanation-flower-architecture.rst:59 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -msgid "Enabling SSL for secure connections" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Arquitetura do Flower" -#: ../../source/how-to-run-flower-using-docker.rst:102 -msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +#: ../../source/explanation-flower-architecture.rst:71 +msgid "The basic Flower architecture for federated learning." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:106 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:110 +#: ../../source/explanation-flower-architecture.rst:79 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:128 +#: ../../source/explanation-flower-architecture.rst:82 msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:134 -msgid "Flower SuperNode" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-flower-architecture.rst:87 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:141 -msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +#: ../../source/explanation-flower-architecture.rst:97 +msgid "Multi-tenancy federated learning architecture" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:147 -msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +#: ../../source/explanation-flower-architecture.rst:97 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:155 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:99 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -"Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " -"ambiente de desenvolvimento local." - -#: ../../source/how-to-run-flower-using-docker.rst:159 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "Clone o repositório do flower." -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" +#: ../../source/explanation-flower-architecture.rst:104 +msgid "" +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" +#: ../../source/explanation-flower-architecture.rst:113 +msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:184 +#: ../../source/explanation-flower-architecture.rst:113 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-flower-architecture.rst:116 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:200 -msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +#: ../../source/explanation-flower-architecture.rst:125 +msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:203 +#: ../../source/explanation-flower-architecture.rst:125 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:217 +#: ../../source/explanation-flower-architecture.rst:129 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:226 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:228 +#: ../../source/explanation-flower-architecture.rst:132 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" +#: ../../source/explanation-flower-architecture.rst:151 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/explanation-flower-architecture.rst:156 +msgid "" +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/explanation-flower-architecture.rst:161 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:269 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:273 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:283 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:285 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:297 -msgid "Flower ServerApp" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:299 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:301 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:304 -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:320 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:324 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:335 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:343 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:352 -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:357 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:385 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:389 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:399 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:401 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:412 -msgid "Advanced Docker options" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:417 -msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:424 -msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:434 -msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:456 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:462 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:467 -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:483 -msgid "Setting environment variables" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:485 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +msgid "" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" msgstr "" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" msgstr "" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" msgstr "" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" msgstr "" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" msgstr "" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." msgstr "" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" msgstr "" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." msgstr "" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +msgid "" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." msgstr "" -#: ../../source/how-to-run-simulations.rst:110 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." msgstr "" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." msgstr "" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." msgstr "" -#: ../../source/how-to-run-simulations.rst:124 -msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." msgstr "" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" msgstr "" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -#: ../../source/how-to-run-simulations.rst:146 -msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -#: ../../source/how-to-run-simulations.rst:149 -msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-run-simulations.rst:150 -msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" msgstr "" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" msgstr "" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"The same :code:`Strategy`-customization approach can be used to aggregate" +" custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" +msgstr "" + +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/how-to-authenticate-supernodes.rst:7 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-authenticate-supernodes.rst:8 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +#: ../../source/how-to-authenticate-supernodes.rst:10 +msgid "SuperLink verifies the token" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:12 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:15 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" -msgstr "" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-authenticate-supernodes.rst:18 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 -msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +#: ../../source/how-to-authenticate-supernodes.rst:21 +msgid "Enable node authentication in :code:`SuperLink`" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-authenticate-supernodes.rst:23 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower " +":code:`SuperLink`. Use the following terminal command to start a Flower " +":code:`SuperNode` that has both secure connections and node " +"authentication enabled:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" +#: ../../source/how-to-authenticate-supernodes.rst:38 +msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:40 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " +"file storing all known node public keys. You need to store all known node" +" public keys that are allowed to participate in a federation in one CSV " +"file (:code:`.csv`)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" +#: ../../source/how-to-authenticate-supernodes.rst:42 +msgid "" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:44 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"The second and third flags :code:`--auth-superlink-private-key` and :code" +":`--auth-superlink-public-key` expect paths to the server's private and " +"public keys. For development purposes, you can generate a private and " +"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 -msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +#: ../../source/how-to-authenticate-supernodes.rst:53 +msgid "Enable node authentication in :code:`SuperNode`" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:55 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" +"Similar to the long-running Flower server (:code:`SuperLink`), you can " +"easily enable node authentication in the long-running Flower client " +"(:code:`SuperNode`). Use the following terminal command to start an " +"authenticated :code:`SuperNode`:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:66 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"The :code:`--auth-supernode-private-key` flag expects a path to the " +"node's private key file and the :code:`--auth-supernode-public-key` flag " +"expects a path to the node's public key file. For development purposes, " +"you can generate a private and public key pair using :code:`ssh-keygen -t" +" ecdsa -b 384`." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +#: ../../source/how-to-authenticate-supernodes.rst:70 +msgid "Security notice" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:72 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" +#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:68 +#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +#: ../../source/how-to-authenticate-supernodes.rst:79 +msgid "" +"You should now have learned how to start a long-running Flower server " +"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " +"authentication enabled. You should also know the significance of the " +"private key and store it safely to minimize security risks." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 -msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +#: ../../source/how-to-configure-clients.rst:7 +msgid "Configuration values" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-configure-clients.rst:9 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" +#: ../../source/how-to-configure-clients.rst:20 +msgid "" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-configure-clients.rst:24 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-configure-clients.rst:26 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" +#: ../../source/how-to-configure-clients.rst:30 +msgid "Configuration through built-in strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-configure-clients.rst:32 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" +"called configuration functions. A configuration function is a function " +"that the built-in strategy calls to get the configuration dictionary for " +"the current round. It then forwards the configuration dictionary to all " +"the clients selected during that round." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-configure-clients.rst:34 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-configure-clients.rst:47 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +":code:`on_fit_config_fn`:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-configure-clients.rst:56 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:67 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" +#: ../../source/how-to-configure-clients.rst:82 +msgid "The :code:`FedAvg` strategy will call this function *every round*." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-configure-clients.rst:85 +msgid "Configuring individual clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-configure-clients.rst:87 +msgid "" +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-configure-clients.rst:89 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" +#: ../../source/how-to-configure-logging.rst:4 +msgid "" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +#: ../../source/how-to-configure-logging.rst:13 +msgid "" +"containing relevant information including: log message level (e.g. " +":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " +"took place from, as well as the log message itself. In this way, the " +"logger would typically display information on your terminal as follows:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-configure-logging.rst:34 +msgid "Saving log to file" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-logging.rst:36 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do :code:`fl.server.start_server`) and when " +"using the :code:`VirtualClientEngine` (i.e. when you do " +":code:`fl.simulation.start_simulation`). In some situations you might " +"want to save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +#: ../../source/how-to-configure-logging.rst:53 +msgid "" +"With the above, Flower will record the log you see on your terminal to " +":code:`log.txt`. This file will be created in the same directory as were " +"you are running the code from. If we inspect we see the log above is also" +" recorded but prefixing with :code:`identifier` each line:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-configure-logging.rst:74 +msgid "Log your own messages" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:76 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-logging.rst:102 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" +#: ../../source/how-to-configure-logging.rst:128 +msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-logging.rst:130 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"The :code:`fl.common.logger.configure` function, also allows specifying a" +" host to which logs can be pushed (via :code:`POST`) through a native " +"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" +" feature in :code:`gRPC`-based Federated Learning workloads where " +"otherwise gathering logs from all entities (i.e. the server and the " +"clients) might be cumbersome. Note that in Flower simulation, the server " +"automatically displays all logs. You can still specify a " +":code:`HTTPHandler` should you wish to backup or analyze the logs " +"somewhere else." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +"This guide describes how to a SSL-enabled secure Flower server " +"(:code:`SuperLink`) can be started and how a Flower client " +"(:code:`SuperNode`) can establish a secure connections to it." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-enable-ssl-connections.rst:7 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:10 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The code example comes with a :code:`README.md` file which explains how " +"to start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-enable-ssl-connections.rst:18 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in :code:`examples/advanced-" +"tensorflow/certificates/generate.sh` with the following command sequence:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"This will generate the certificates in :code:`examples/advanced-" +"tensorflow/.cache/certificates`." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-enable-ssl-connections.rst:31 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" +#: ../../source/how-to-enable-ssl-connections.rst:39 +msgid "Server (SuperLink)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-enable-ssl-connections.rst:41 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" +#: ../../source/how-to-enable-ssl-connections.rst:50 +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 -msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +#: ../../source/how-to-enable-ssl-connections.rst:54 +msgid "Client (SuperNode)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-enable-ssl-connections.rst:56 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +#: ../../source/how-to-enable-ssl-connections.rst:64 +msgid "" +"When setting :code:`root_certificates`, the client expects a file path to" +" PEM-encoded root certificates." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-enable-ssl-connections.rst:70 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-enable-ssl-connections.rst:75 +msgid "Additional resources" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-enable-ssl-connections.rst:77 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +#: ../../source/how-to-enable-ssl-connections.rst:79 +msgid "`Let's Encrypt `_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "`certbot `_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 -msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 -msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The :code:`Strategy` abstraction" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" +#: ../../source/how-to-implement-strategies.rst:13 +msgid "" +"All strategy implementation are derived from the abstract base class " +":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" +#: ../../source/how-to-implement-strategies.rst:75 +msgid "" +"Creating a new strategy means implementing a new :code:`class` (derived " +"from the abstract base class :code:`Strategy`) that implements for the " +"previously shown abstract methods:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 -msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +#: ../../source/how-to-implement-strategies.rst:100 +msgid "The Flower server calls these methods in the following order:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The following sections describe each of those methods in more detail." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 -msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +#: ../../source/how-to-implement-strategies.rst:180 +msgid "The :code:`initialize_parameters` method" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-implement-strategies.rst:182 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +":code:`initialize_parameters` is called only once, at the very beginning " +"of an execution. It is responsible for providing the initial global model" +" parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-implement-strategies.rst:184 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +":code:`FedAvg`:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" +#: ../../source/how-to-implement-strategies.rst:209 +msgid "" +"The Flower server will call :code:`initialize_parameters`, which either " +"returns the parameters that were passed to :code:`initial_parameters`, or" +" :code:`None`. If no parameters are returned from " +":code:`initialize_parameters` (i.e., :code:`None`), the server will " +"randomly select one client and ask it to provide its parameters. This is " +"a convenience feature and not recommended in practice, but it can be " +"useful for prototyping. In practice, it is recommended to always use " +"server-side parameter initialization." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-implement-strategies.rst:213 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 -msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +#: ../../source/how-to-implement-strategies.rst:216 +msgid "The :code:`configure_fit` method" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +":code:`configure_fit` is responsible for configuring the upcoming round " +"of training. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" +#: ../../source/how-to-implement-strategies.rst:231 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_fit`:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-implement-strategies.rst:233 +#: ../../source/how-to-implement-strategies.rst:280 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"Use the :code:`client_manager` to randomly sample all (or a subset of) " +"available clients (each represented as a :code:`ClientProxy` object)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-implement-strategies.rst:234 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" +#: ../../source/how-to-implement-strategies.rst:236 +msgid "" +"More sophisticated implementations can use :code:`configure_fit` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_fit`." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-implement-strategies.rst:240 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" +#: ../../source/how-to-implement-strategies.rst:243 +msgid "The :code:`aggregate_fit` method" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" +#: ../../source/how-to-implement-strategies.rst:245 +msgid "" +":code:`aggregate_fit` is responsible for aggregating the results returned" +" by the clients that were selected and asked to train in " +":code:`configure_fit`." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-implement-strategies.rst:258 msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-implement-strategies.rst:260 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" +" dictionary of aggregated metrics. The :code:`Parameters` return value is" +" optional because :code:`aggregate_fit` might decide that the results " +"provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-implement-strategies.rst:263 +msgid "The :code:`configure_evaluate` method" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-implement-strategies.rst:265 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" +":code:`configure_evaluate` is responsible for configuring the upcoming " +"round of evaluation. What does *configure* mean in this context? " +"Configuring a round means selecting clients and deciding what " +"instructions to send to these clients. The signature of " +":code:`configure_evaluate` makes this clear:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" +#: ../../source/how-to-implement-strategies.rst:278 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_evaluate`:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" +#: ../../source/how-to-implement-strategies.rst:281 +msgid "" +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " +"the current global model :code:`parameters` and :code:`config` dict" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +#: ../../source/how-to-implement-strategies.rst:283 +msgid "" +"More sophisticated implementations can use :code:`configure_evaluate` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_evaluate`." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" +#: ../../source/how-to-implement-strategies.rst:287 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" +#: ../../source/how-to-implement-strategies.rst:291 +msgid "The :code:`aggregate_evaluate` method" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +#: ../../source/how-to-implement-strategies.rst:293 +msgid "" +":code:`aggregate_evaluate` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +":code:`configure_evaluate`." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:306 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " +"receives a list of :code:`results`, but also a list of :code:`failures`." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +#: ../../source/how-to-implement-strategies.rst:308 +msgid "" +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" +" dictionary of aggregated metrics. The :code:`float` return value is " +"optional because :code:`aggregate_evaluate` might decide that the results" +" provided are not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:59 -msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +#: ../../source/how-to-implement-strategies.rst:311 +msgid "The :code:`evaluate` method" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-implement-strategies.rst:313 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" +":code:`evaluate` is responsible for evaluating model parameters on the " +"server-side. Having :code:`evaluate` in addition to " +":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " +"to perform both servers-side and client-side (federated) evaluation." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" +#: ../../source/how-to-implement-strategies.rst:323 +msgid "" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +":code:`evaluate` method might not complete successfully (e.g., it might " +"fail to load the server-side evaluation data)." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:78 -msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +#: ../../source/how-to-install-flower.rst:6 +msgid "Python version" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" +#: ../../source/how-to-install-flower.rst:12 +msgid "Install stable release" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" +#: ../../source/how-to-install-flower.rst:15 +#: ../../source/how-to-upgrade-to-flower-next.rst:46 +msgid "Using pip" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-install-flower.rst:17 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"Stable releases are available on `PyPI " +"`_::" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-install-flower.rst:21 msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra::" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +#: ../../source/how-to-install-flower.rst:27 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" +#: ../../source/how-to-install-flower.rst:29 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-install-flower.rst:31 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-install-flower.rst:36 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:12 -msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +#: ../../source/how-to-install-flower.rst:40 +msgid "or with ``mamba``::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:15 -msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +#: ../../source/how-to-install-flower.rst:46 +msgid "Verify installation" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-install-flower.rst:48 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:22 -msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +#: ../../source/how-to-install-flower.rst:58 +msgid "Advanced installation options" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" +#: ../../source/how-to-install-flower.rst:61 +msgid "Install via Docker" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:31 -msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +#: ../../source/how-to-install-flower.rst:63 +msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" +#: ../../source/how-to-install-flower.rst:66 +msgid "Install pre-release" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-install-flower.rst:68 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-install-flower.rst:72 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 -msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +#: ../../source/how-to-install-flower.rst:77 +msgid "Install nightly release" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-install-flower.rst:79 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +#: ../../source/how-to-install-flower.rst:83 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra::" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-monitor-simulation.rst:6 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-monitor-simulation.rst:10 +msgid "Downloads" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-monitor-simulation.rst:16 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-monitor-simulation.rst:18 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-monitor-simulation.rst:20 +msgid "If you are on an M1 Mac, it should be:" msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-monitor-simulation.rst:27 +msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" +#: ../../source/how-to-monitor-simulation.rst:34 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-monitor-simulation.rst:44 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-monitor-simulation.rst:59 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-monitor-simulation.rst:69 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-monitor-simulation.rst:84 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" +#: ../../source/how-to-monitor-simulation.rst:88 +msgid "Tracking metrics" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-monitor-simulation.rst:90 msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-monitor-simulation.rst:97 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Please include the following argument in your Python code when starting a" +" simulation." msgstr "" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "Now, you are ready to start your workload." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:110 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" +#: ../../source/how-to-monitor-simulation.rst:117 +msgid "You can look at everything at ``_ ." msgstr "" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -#: ../../source/index.rst:34 -msgid "Tutorial" +#: ../../source/how-to-monitor-simulation.rst:123 +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port :code:`3000` on " +"your machine as long as they are running." msgstr "" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "Resource allocation" msgstr "" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" +#: ../../source/how-to-monitor-simulation.rst:134 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-monitor-simulation.rst:136 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" +#: ../../source/how-to-monitor-simulation.rst:143 +msgid "In Google Colab, the result you see might be similar to this:" msgstr "" -#: None:-1 -msgid "API reference" +#: ../../source/how-to-monitor-simulation.rst:155 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" -#: ../../source/index.rst:137 -msgid "Reference docs" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "Let’s also specify the resource for a single client." msgstr "" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" +#: ../../source/how-to-monitor-simulation.rst:205 +msgid "" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" +#: ../../source/how-to-monitor-simulation.rst:207 +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " +"running two clients and therefore enable them to run concurrently. Be " +"careful not to require more resources than available. If you specified " +":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " +"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." msgstr "" -#: ../../source/index.rst:172 -msgid "Contributor explanations" +#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +msgid "FAQ" msgstr "" -#: ../../source/index.rst:178 -msgid "Contributor references" +#: ../../source/how-to-monitor-simulation.rst:214 +msgid "Q: I don't see any metrics logged." msgstr "" -#: ../../source/index.rst:-1 +#: ../../source/how-to-monitor-simulation.rst:216 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "" - -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" -#: ../../source/index.rst:7 +#: ../../source/how-to-monitor-simulation.rst:218 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" +#: ../../source/how-to-monitor-simulation.rst:220 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" -#: ../../source/index.rst:13 +#: ../../source/how-to-monitor-simulation.rst:226 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"Q: I see \"This site can't be reached\" when going to " +"``_." msgstr "" -#: ../../source/index.rst:15 -msgid "Join us on Slack" +#: ../../source/how-to-monitor-simulation.rst:228 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." msgstr "" -#: ../../source/index.rst:23 -msgid "Flower Framework" +#: ../../source/how-to-monitor-simulation.rst:232 +msgid "Resources" msgstr "" -#: ../../source/index.rst:25 +#: ../../source/how-to-monitor-simulation.rst:234 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"Ray Dashboard: ``_" msgstr "" -#: ../../source/index.rst:30 -msgid "Tutorials" +#: ../../source/how-to-monitor-simulation.rst:236 +msgid "Ray Metrics: ``_" msgstr "" -#: ../../source/index.rst:32 -msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" msgstr "" -#: ../../source/index.rst:61 +#: ../../source/how-to-run-simulations.rst:8 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" -msgstr "" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" +#: ../../source/how-to-run-simulations.rst:10 +msgid "" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" msgstr "" -#: ../../source/index.rst:76 +#: ../../source/how-to-run-simulations.rst:12 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/index.rst:110 +#: ../../source/how-to-run-simulations.rst:13 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." msgstr "" -#: ../../source/index.rst:120 -msgid "References" +#: ../../source/how-to-run-simulations.rst:14 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." +#: ../../source/how-to-run-simulations.rst:16 +msgid "" +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." msgstr "" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-run-simulations.rst:22 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -#: ../../source/index.rst:148 -msgid "Contributor docs" +#: ../../source/how-to-run-simulations.rst:44 +msgid "VirtualClientEngine resources" msgstr "" -#: ../../source/index.rst:150 +#: ../../source/how-to-run-simulations.rst:45 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." -msgstr "" - -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +":code:`ray_init_args` input argument to :code:`start_simulation` which " +"the VCE internally passes to Ray's :code:`ray.init` command. For a " +"complete list of settings you can configure check the `ray.init " +"`_" +" documentation. Do not set :code:`ray_init_args` if you want the VCE to " +"use all your system's CPUs and GPUs." msgstr "" -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" +#: ../../source/how-to-run-simulations.rst:62 +msgid "Assigning client resources" msgstr "" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" +#: ../../source/how-to-run-simulations.rst:63 +msgid "" +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" +" nothing else) to each virtual client. This means that if your system has" +" 10 cores, that many virtual clients can be concurrently running." msgstr "" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" +#: ../../source/how-to-run-simulations.rst:65 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" +#: ../../source/how-to-run-simulations.rst:67 +msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" +#: ../../source/how-to-run-simulations.rst:68 +msgid "" +":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " +"assigned." msgstr "" -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" +#: ../../source/how-to-run-simulations.rst:70 +msgid "Let's see a few examples:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-run-simulations.rst:89 +msgid "" +"While the :code:`client_resources` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +":code:`VirtualClientEngine` will schedule 100 jobs to run (each " +"simulating a client sampled by the strategy) and then will execute them " +"in a resource-aware manner in batches of 8." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." +#: ../../source/how-to-run-simulations.rst:91 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-run-simulations.rst:94 +msgid "Simulation examples" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." +#: ../../source/how-to-run-simulations.rst:96 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-run-simulations.rst:98 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." +#: ../../source/how-to-run-simulations.rst:99 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-run-simulations.rst:104 +msgid "Multi-node Flower simulations" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." +#: ../../source/how-to-run-simulations.rst:106 +msgid "" +"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " +"across multiple compute nodes. Before starting your multi-node simulation" +" ensure that you:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" +#: ../../source/how-to-run-simulations.rst:108 +msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" +#: ../../source/how-to-run-simulations.rst:109 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:110 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:111 +msgid "" +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " +"`_ so the " +":code:`VirtualClientEngine` attaches to a running Ray instance." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." +#: ../../source/how-to-run-simulations.rst:112 +msgid "" +"Start Ray on you head node: on the terminal type :code:`ray start " +"--head`. This command will print a few lines, one of which indicates how " +"to attach other nodes to the head node." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:113 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +":code:`ray start --address='192.168.1.132:6379'`" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." +#: ../../source/how-to-run-simulations.rst:115 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:117 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command :code:`ray stop` in each node's " +"terminal (including the head node)." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-run-simulations.rst:120 +msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:122 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-run-simulations.rst:124 +msgid "" +"User :code:`ray status` to check all nodes connected to your head node as" +" well as the total resources available to the " +":code:`VirtualClientEngine`." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-run-simulations.rst:126 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +":code:`VirtualClientEngine` can schedule as many `virtual` clients as " +"that node can possible run. In some settings you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"`--num-cpus=` and/or `--num-" +"gpus=` in any :code:`ray start` command (including " +"when starting the head)" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:132 +msgid "Considerations for simulations" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-run-simulations.rst:135 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:138 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." +#: ../../source/how-to-run-simulations.rst:141 +msgid "GPU resources" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:143 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: ../../source/how-to-run-simulations.rst:146 +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set :code:`num_gpus=0.5` and you have two GPUs in your system with " +"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" +" concurrently." msgstr "" -#: ../../source/ref-api/flwr.client.rst:52::1 -msgid ":py:obj:`flwr.client.mod `\\" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -msgid "Flower Built-in Mods." +#: ../../source/how-to-run-simulations.rst:149 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-run-simulations.rst:150 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " +"experiment." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-run-simulations.rst:153 +msgid "" +"In addition, the GPU resource limits passed to :code:`client_resources` " +"are not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:156 +msgid "TensorFlow with GPUs" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:158 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:160 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " +"in order to specify a function to be executed upon actor initialization. " +"In this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:179 +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:183 +msgid "Multi-node setups" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +":code:`Strategy` methods. Implementing custom strategies is always an " +"option, but for many cases it may be more convenient to simply customize " +"an existing strategy. The following code example defines a new " +":code:`SaveModelStrategy` which customized the existing built-in " +":code:`FedAvg` strategy. In particular, it customizes " +":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " +"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" +" before it returns those aggregated weights to the caller (i.e., the " +"server):" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +msgid "Save and load PyTorch checkpoints" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "" - -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 +#: ../../source/how-to-upgrade-to-flower-next.rst:43 +msgid "Install update" msgstr "" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "pip: add ``-U`` when installing." msgstr "" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" msgstr "" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "" - -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-next.rst:100 +msgid "Required changes" msgstr "" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +msgid "The following breaking changes require manual updates." msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +msgid "General" msgstr "" -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" -#: flwr.client.client_app.ClientApp:21 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +msgid "Rename parameter/ndarray conversion functions:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +#: ../../source/how-to-upgrade-to-flower-next.rst:317 +msgid "Further help" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:4 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-next.rst:9 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:13 +msgid "Let's dive in!" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:48 msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -msgid "Client-side adaptive clipping modifier." +#: ../../source/how-to-upgrade-to-flower-next.rst:54 +msgid "or if you need Flower Next with simulation:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:61 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -msgid "Client-side fixed clipping modifier." +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:71 +msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." +#: ../../source/how-to-upgrade-to-flower-next.rst:82 +msgid "Using Poetry" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:84 msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." +#: ../../source/how-to-upgrade-to-flower-next.rst:86 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:102 msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." +#: ../../source/how-to-upgrade-to-flower-next.rst:109 +msgid "|clientapp_link|_" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:110 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|serverapp_link|_" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:133 msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -msgid "Parameters size mod." +#: ../../source/how-to-upgrade-to-flower-next.rst:154 +msgid "Deployment" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:155 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -msgid "Modifier for local differential privacy." +#: ../../source/how-to-upgrade-to-flower-next.rst:158 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" msgstr "" -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" -msgstr "" - -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:174 msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "Simulation in CLI" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/how-to-upgrade-to-flower-next.rst:202 +msgid "" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." +#: ../../source/how-to-upgrade-to-flower-next.rst:232 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-upgrade-to-flower-next.rst:249 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-upgrade-to-flower-next.rst:275 +msgid "Simulation in a Notebook" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:276 msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" +#: ../../source/how-to-upgrade-to-flower-next.rst:319 +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:325 +msgid "Important" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:328 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." +#: ../../source/how-to-upgrade-to-flower-next.rst:334 +msgid "Happy migrating! 🚀" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-use-built-in-mods.rst:4 msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -msgid "Notes" +#: ../../source/how-to-use-built-in-mods.rst:6 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." +#: ../../source/how-to-use-built-in-mods.rst:9 +msgid "What are Mods?" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +#: ../../source/how-to-use-built-in-mods.rst:11 +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" +#: ../../source/how-to-use-built-in-mods.rst:18 +msgid "A typical mod function might look something like this:" msgstr "" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of -msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +#: ../../source/how-to-use-built-in-mods.rst:31 +msgid "Using Mods" msgstr "" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." +#: ../../source/how-to-use-built-in-mods.rst:33 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "1. Import the required mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." +#: ../../source/how-to-use-built-in-mods.rst:46 +msgid "2. Define your client function" msgstr "" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 -msgid "parameters\\_size\\_mod" +#: ../../source/how-to-use-built-in-mods.rst:48 +msgid "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of -msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." +#: ../../source/how-to-use-built-in-mods.rst:57 +msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" +#: ../../source/how-to-use-built-in-mods.rst:59 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 -msgid "secaggplus\\_mod" +#: ../../source/how-to-use-built-in-mods.rst:72 +msgid "Order of execution" msgstr "" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +#: ../../source/how-to-use-built-in-mods.rst:74 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" +#: ../../source/how-to-use-built-in-mods.rst:76 +msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" +#: ../../source/how-to-use-built-in-mods.rst:77 +msgid "``example_mod_2`` (next mod)" msgstr "" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-use-built-in-mods.rst:78 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +#: ../../source/how-to-use-built-in-mods.rst:79 +msgid "``example_mod_2`` (on the way back)" msgstr "" -#: flwr.client.app.start_client:9 of -msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-use-built-in-mods.rst:82 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-use-built-in-mods.rst:87 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of -msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +#: ../../source/how-to-use-built-in-mods.rst:89 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of -msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" msgstr "" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-use-differential-privacy.rst:7 msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" +#: ../../source/how-to-use-differential-privacy.rst:12 +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" +#: ../../source/how-to-use-differential-privacy.rst:16 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "Server-side Clipping" msgstr "" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-use-differential-privacy.rst:22 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." msgstr "" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-use-differential-privacy.rst:52 +msgid "Client-side Clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." +#: ../../source/how-to-use-differential-privacy.rst:53 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." +#: ../../source/how-to-use-differential-privacy.rst:63 +msgid "" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:80 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/how-to-use-differential-privacy.rst:104 +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:122 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-use-differential-privacy.rst:126 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:16 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:42 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." +#: ../../source/how-to-use-strategies.rst:75 +msgid "" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:78 msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-use-strategies.rst:83 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/index.rst:34 +msgid "Tutorial" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +#: ../../source/index.rst:75 ../../source/index.rst:79 +msgid "How-to guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +#: ../../source/index.rst:100 +msgid "Legacy example guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +#: ../../source/index.rst:108 ../../source/index.rst:112 +msgid "Explanations" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: None:-1 +msgid "API reference" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/index.rst:138 +msgid "Reference docs" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: ../../source/index.rst:154 +msgid "Contributor tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/index.rst:161 +msgid "Contributor how-to guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: ../../source/index.rst:173 +msgid "Contributor explanations" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." +#: ../../source/index.rst:179 +msgid "Contributor references" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/index.rst:11 +msgid "Join the Flower Community" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:13 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/index.rst:15 +msgid "Join us on Slack" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." +#: ../../source/index.rst:23 +msgid "Flower Framework" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:25 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." +#: ../../source/index.rst:30 +msgid "Tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:32 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/index.rst:62 +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/index.rst:64 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: ../../source/index.rst:69 +msgid "And TensorFlow:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: ../../source/index.rst:77 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:110 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." +#: ../../source/index.rst:121 +msgid "References" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +#: ../../source/index.rst:123 +msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +#: ../../source/index.rst:132::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: ../../source/index.rst:132::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +#: ../../source/index.rst:149 +msgid "Contributor docs" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:151 msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +#: ../../source/ref-api-cli.rst:7 +msgid "flwr CLI" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/ref-api-cli.rst +msgid "Options" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr:1 msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr build:1 msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." +#: ../../flwr install:1 +msgid "Install a Flower App Bundle." msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of -msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../flwr install:1 msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "Argumento de compilação" + +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "Argumento de compilação" + +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../flwr log +msgid "default" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Argumento de compilação" + +#: ../../flwr new:1 +msgid "Create new Flower App." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_res " -"`\\" +#: ../../flwr new +msgid "options" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../flwr new:1 msgid "" -":py:obj:`get_properties_res " -"`\\" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../flwr run:1 +msgid "Run Flower App." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr run:1 msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr run:1 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" msgstr "" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" +#: ../../source/ref-api-cli.rst:36 +msgid "flower-supernode" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api-cli.rst:49 +msgid "" +"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " +"longer supports passing a reference to a `ServerApp` attribute. Instead, " +"you need to pass the path to Flower app via the argument :code:`--app`. " +"This is the path to a directory containing a `pyproject.toml`. You can " +"create a valid Flower app by executing :code:`flwr new` and following the" +" prompt." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." +#: ../../source/ref-api-cli.rst:62 +msgid "flower-superexec" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: flwr.common.context.Context:3 of -msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" -msgstr "" - -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" msgstr "" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" +#: ../../source/ref-api/flwr.client.rst:50::1 +msgid ":py:obj:`flwr.client.mod `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.evaluate:8 of msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.fit:3 of msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.fit:8 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.get_parameters:3 of msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.get_properties:3 of msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "" + +#: flwr.client.client_app.ClientApp:5 of msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:16 of msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of -msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:9 of msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:31 of msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: flwr.common.EventType.capitalize:3 of +#: flwr.client.app.start_client:35 of msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.common.EventType.count:1 of -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.common.EventType.encode:3 of -msgid "encoding" +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.EventType.encode:6 of -msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: flwr.common.EventType.endswith:1 of -msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." msgstr "" -#: flwr.common.EventType.format_map:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" -#: flwr.common.EventType.isalnum:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: flwr.common.EventType.isalpha:3 of -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: flwr.common.EventType.isascii:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.common.EventType.isdecimal:3 of -msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." msgstr "" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." msgstr "" -#: flwr.common.EventType.islower:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." msgstr "" -#: flwr.common.EventType.isprintable:3 of -msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.isspace:3 of -msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" -#: flwr.common.EventType.istitle:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.EventType.isupper:3 of -msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." msgstr "" -#: flwr.common.EventType.join:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: flwr.common.EventType.maketrans:3 of -msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: flwr.common.EventType.partition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." msgstr "" -#: flwr.common.EventType.removeprefix:3 of -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Config `\\" msgstr "" -#: flwr.common.EventType.removesuffix:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: flwr.common.EventType.replace:4 of -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." msgstr "" -#: flwr.common.EventType.replace:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: flwr.common.EventType.rpartition:3 of -msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.common.EventType.rpartition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." msgstr "" -#: flwr.common.EventType.split:13 of -msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: flwr.common.EventType.splitlines:3 of -msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." msgstr "" -#: flwr.common.EventType.startswith:1 of -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.EventType.title:3 of -msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." -msgstr "" - -#: flwr.common.EventType.translate:5 of -msgid "table" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." msgstr "" -#: flwr.common.EventType.translate:4 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.EventType.translate:7 of -msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics recod." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: flwr.common.message.Message:5 of -msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArrays `\\" msgstr "" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Properties `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -msgid "The content of this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.common.record.parametersrecord.Array:3 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.common.record.parametersrecord.Array:6 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: flwr.common.record.parametersrecord.Array:12 of msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." -msgstr "" - -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" msgstr "" -#: flwr.common.message.Metadata:13 of -msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.common.message.Metadata:21 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: ../../source/ref-api/flwr.common.Config.rst:2 +msgid "Config" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " +":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: flwr.common.context.Context:5 of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: flwr.common.context.Context:8 of msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of -msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid "" -":py:obj:`get_parameters_ins " -"`\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid "" -":py:obj:`get_properties_ins " -"`\\" +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" -msgstr "" - -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: logging.Logger.log:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "" - -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of -msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of -msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of -msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of -msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of -msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of -msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: flwr.common.EventType.encode:3 of +msgid "encoding" msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: flwr.common.EventType.encode:9 of +msgid "errors" msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: flwr.common.EventType.encode:6 of +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: flwr.common.EventType.endswith:1 of +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." msgstr "" -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." -msgstr "" - -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: flwr.common.EventType.format:1 of +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isalnum:3 of msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isalpha:3 of msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isascii:3 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isdecimal:3 of msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.isdigit:3 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.common.EventType.isidentifier:3 of msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: flwr.common.EventType.istitle:3 of +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: flwr.common.EventType.join:3 of +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.common.EventType.maketrans:3 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "" - -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.common.EventType.partition:3 of msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.common.EventType.partition:7 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.removeprefix:3 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.removesuffix:3 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." msgstr "" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +#: flwr.common.EventType.replace:5 of +msgid "count" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.split:13 of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.startswith:1 of msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: flwr.common.EventType.translate:5 of +msgid "table" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.translate:7 of msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "" + +#: flwr.common.message.Message:5 of msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.message.Metadata:13 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +msgid "Metrics" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: flwr.common.record.metricsrecord.MetricsRecord:28 of +msgid "" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of -msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: flwr.common.record.parametersrecord.ParametersRecord:50 of +msgid "" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: flwr.common.record.parametersrecord.ParametersRecord:83 of +msgid "" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: ../../source/ref-api/flwr.common.Properties.rst:2 +msgid "Properties" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: flwr.common.record.recordset.RecordSet:9 of +msgid "" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of -msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: logging.Logger.log:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid ":py:obj:`run `\\" +msgstr "" + +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "Run information." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:9 of msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of -msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`client_manager " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`server `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: flwr.server.server_config.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:12 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: flwr.server.app.start_server:32 of +msgid "CA certificate." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.app.start_server:33 of +msgid "server certificate." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +#: flwr.server.app.start_server:34 of +msgid "server private key." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." msgstr "" -#: flwr.server.strategy.krum.Krum:17 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of -msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of -msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of -msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of -msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of -msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +msgid "Aggregate evaluation losses using the given strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of -msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +msgid "" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of -msgid "Execute the 'unmask' stage." +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 #: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 #: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" -msgstr "" - -#: flwr.simulation.run_simulation.run_simulation:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." -msgstr "" - -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of -msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: flwr.simulation.app.start_simulation:31 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:50 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "v1.9.0 (2024-06-10)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:9 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:13 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:21 -msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: ../../source/ref-changelog.md:23 -msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:29 -msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." msgstr "" -#: ../../source/ref-changelog.md:31 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:35 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:39 -msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: ../../source/ref-changelog.md:41 -msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" msgstr "" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:51 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:61 -msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" msgstr "" -#: ../../source/ref-changelog.md:63 -msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -#: ../../source/ref-changelog.md:65 -msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -#: ../../source/ref-changelog.md:67 -msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-changelog.md:69 -msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:77 -msgid "**Deprecate Python 3.8 support**" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:81 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:85 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:87 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" msgstr "" -#: ../../source/ref-changelog.md:91 -msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." msgstr "" -#: ../../source/ref-changelog.md:93 -msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -#: ../../source/ref-changelog.md:95 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:103 -msgid "v1.8.0 (2024-04-03)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:117 -msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:125 -msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -#: ../../source/ref-changelog.md:127 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:133 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:137 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:141 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:143 -msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: ../../source/ref-changelog.md:145 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:147 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:149 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-changelog.md:151 -msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:157 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:159 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: ../../source/ref-changelog.md:167 -msgid "v1.7.0 (2024-02-05)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:177 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:179 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:181 -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." msgstr "" -#: ../../source/ref-changelog.md:183 -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" msgstr "" -#: ../../source/ref-changelog.md:185 -msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: ../../source/ref-changelog.md:187 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-changelog.md:189 -msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:193 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:195 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:197 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:201 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:205 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:207 -msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: ../../source/ref-changelog.md:209 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:211 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:213 -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: ../../source/ref-changelog.md:215 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" - -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:221 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:222 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:223 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:224 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: ../../source/ref-changelog.md:226 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:228 -msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:232 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:240 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:242 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:248 -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: ../../source/ref-changelog.md:250 -msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: ../../source/ref-changelog.md:252 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: ../../source/ref-changelog.md:256 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-changelog.md:258 -msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: ../../source/ref-changelog.md:260 -msgid "v1.6.0 (2023-11-28)" +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: ../../source/ref-changelog.md:270 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-changelog.md:272 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:276 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:280 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:282 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:288 -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: ../../source/ref-changelog.md:292 -msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" msgstr "" -#: ../../source/ref-changelog.md:294 -msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:300 -msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:304 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:306 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:314 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:316 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:318 -msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: ../../source/ref-changelog.md:320 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." msgstr "" -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:336 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:338 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:340 -msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-changelog.md:342 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:344 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:346 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:352 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:354 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:356 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:372 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:384 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:386 -msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-changelog.md:388 -msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: ../../source/ref-changelog.md:390 -msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: ../../source/ref-changelog.md:394 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:396 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:398 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:400 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:402 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" -msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-changelog.md:410 -msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: ../../source/ref-changelog.md:412 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: ../../source/ref-changelog.md:414 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:416 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:418 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:420 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:422 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:424 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:428 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:436 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:456 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:460 -msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:464 -msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-changelog.md:466 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:468 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-changelog.md:470 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:472 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:474 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:476 -msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-changelog.md:478 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:480 -msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-changelog.md:482 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:484 -msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:488 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:490 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:492 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-changelog.md:494 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:496 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:498 -msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: ../../source/ref-changelog.md:514 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:518 -msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-changelog.md:520 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:522 -msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-changelog.md:524 -msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-changelog.md:526 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" +msgstr "" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: ../../source/ref-changelog.md:530 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:532 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-changelog.md:534 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-changelog.md:536 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:538 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-changelog.md:540 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-changelog.md:542 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "" - -#: ../../source/ref-changelog.md:544 -msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:546 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-changelog.md:548 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-changelog.md:550 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:552 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-changelog.md:554 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:558 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 -msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:572 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:576 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-changelog.md:578 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-changelog.md:580 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:582 -msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: ../../source/ref-changelog.md:584 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:586 -msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: ../../source/ref-changelog.md:588 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:589 -msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: ../../source/ref-changelog.md:590 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:591 -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: ../../source/ref-changelog.md:593 -msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: ../../source/ref-changelog.md:595 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-changelog.md:597 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:599 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:601 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: ../../source/ref-changelog.md:603 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:605 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: ../../source/ref-changelog.md:607 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: ../../source/ref-changelog.md:609 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:611 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:613 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:615 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:617 -msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: ../../source/ref-changelog.md:619 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:621 -msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-changelog.md:629 -msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:639 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-changelog.md:641 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:645 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-changelog.md:647 -msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:649 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-changelog.md:651 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-changelog.md:653 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-changelog.md:655 -msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:657 -msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-changelog.md:659 -msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-changelog.md:661 -msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-changelog.md:663 +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:665 +#: ../../source/ref-changelog.md:9 msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:667 -msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +#: ../../source/ref-changelog.md:11 +msgid "Improvements" msgstr "" -#: ../../source/ref-changelog.md:669 +#: ../../source/ref-changelog.md:13 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-changelog.md:671 +#: ../../source/ref-changelog.md:15 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:17 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:19 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:21 msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:23 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:25 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-changelog.md:683 +#: ../../source/ref-changelog.md:27 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:29 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "" + +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "" + +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:41 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:45 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:47 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/ref-changelog.md:49 +msgid "" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:53 +msgid "" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/ref-changelog.md:55 +msgid "" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:59 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:60 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:61 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:63 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:65 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." +msgstr "" + +#: ../../source/ref-changelog.md:68 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:69 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:70 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:72 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/ref-changelog.md:74 +msgid "" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/ref-changelog.md:76 +msgid "" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/ref-changelog.md:78 +msgid "" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:80 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:82 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-changelog.md:84 +msgid "" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:86 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:88 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" +msgstr "" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:102 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-changelog.md:104 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:113 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:115 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:117 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:121 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:123 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:756 -msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:135 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:137 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:762 -msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:145 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:149 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:151 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:153 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:155 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:157 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:159 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:161 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:163 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:165 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:167 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:169 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" +#: ../../source/ref-changelog.md:171 +msgid "" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" +#: ../../source/ref-changelog.md:173 +msgid "" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" +#: ../../source/ref-changelog.md:175 +msgid "" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" +#: ../../source/ref-changelog.md:177 +msgid "" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" +#: ../../source/ref-changelog.md:179 +msgid "" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" +#: ../../source/ref-changelog.md:181 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:183 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:185 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:801 +#: ../../source/ref-changelog.md:187 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:189 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" +#: ../../source/ref-changelog.md:191 +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:193 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:195 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:197 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:199 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:201 +msgid "" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:203 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-changelog.md:818 -msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +#: ../../source/ref-changelog.md:207 +msgid "Documentation improvements" msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:209 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:211 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:213 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:215 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-changelog.md:828 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:221 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:223 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:225 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:229 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:231 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:233 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:235 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:844 -msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:243 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:247 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:249 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-changelog.md:852 +#: ../../source/ref-changelog.md:251 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:253 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:255 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-changelog.md:858 +#: ../../source/ref-changelog.md:257 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:259 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:261 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:263 msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:265 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:267 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:269 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:271 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:273 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:275 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:277 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:279 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:281 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:283 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" -msgstr "" - -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:285 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:287 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:289 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:291 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:293 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:295 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:297 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:299 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:301 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:303 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -#: ../../source/ref-changelog.md:903 -msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:307 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -#: ../../source/ref-changelog.md:907 -msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:313 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:315 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:317 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:319 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:321 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:325 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:327 msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:329 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:331 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:333 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:335 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: ../../source/ref-changelog.md:932 -msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:343 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:347 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:349 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: ../../source/ref-changelog.md:936 +#: ../../source/ref-changelog.md:351 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:353 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:355 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:357 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." -msgstr "" - -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: ../../source/ref-changelog.md:949 +#: ../../source/ref-changelog.md:359 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: ../../source/ref-changelog.md:951 +#: ../../source/ref-changelog.md:361 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:363 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:365 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: ../../source/ref-changelog.md:957 +#: ../../source/ref-changelog.md:367 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:369 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:371 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:373 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:375 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:377 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:379 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:381 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:383 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:385 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:387 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:389 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:391 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:393 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:395 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" +msgstr "" + +#: ../../source/ref-changelog.md:407 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:411 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:413 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:415 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" +#: ../../source/ref-changelog.md:417 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:419 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:421 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:423 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:425 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:427 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:429 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/ref-changelog.md:1011 +#: ../../source/ref-changelog.md:431 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-changelog.md:1013 +#: ../../source/ref-changelog.md:433 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:435 +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/ref-changelog.md:1017 +#: ../../source/ref-changelog.md:437 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/ref-changelog.md:1021 +#: ../../source/ref-changelog.md:439 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-changelog.md:1023 +#: ../../source/ref-changelog.md:441 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/ref-changelog.md:1025 +#: ../../source/ref-changelog.md:443 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:445 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:447 +msgid "" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:449 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/ref-changelog.md:1035 -msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-changelog.md:1037 -msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:455 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/ref-changelog.md:1060 -msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: ../../source/ref-changelog.md:1066 -msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/ref-changelog.md:1068 -msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/ref-changelog.md:1070 -msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:462 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:464 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-changelog.md:1089 +#: ../../source/ref-changelog.md:466 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-changelog.md:1091 +#: ../../source/ref-changelog.md:468 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-changelog.md:1093 +#: ../../source/ref-changelog.md:470 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-changelog.md:1095 +#: ../../source/ref-changelog.md:474 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:476 +msgid "" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-changelog.md:1116 +#: ../../source/ref-changelog.md:478 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" +#: ../../source/ref-changelog.md:480 +msgid "" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:482 +msgid "" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:484 +msgid "" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-changelog.md:1120 +#: ../../source/ref-changelog.md:486 msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:488 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" +#: ../../source/ref-changelog.md:490 +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:492 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" +#: ../../source/ref-changelog.md:500 +msgid "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:504 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-changelog.md:1132 +#: ../../source/ref-changelog.md:506 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:508 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/ref-changelog.md:510 +msgid "" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" +#: ../../source/ref-changelog.md:512 +msgid "" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:514 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/ref-changelog.md:516 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/ref-changelog.md:518 +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:522 +msgid "" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/ref-changelog.md:1147 -msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-changelog.md:1148 +#: ../../source/ref-changelog.md:526 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:528 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/ref-changelog.md:1150 +#: ../../source/ref-changelog.md:530 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:532 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/ref-changelog.md:534 +msgid "" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:536 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:538 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:540 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/ref-example-projects.rst:18 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:548 +msgid "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:550 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/ref-changelog.md:552 +msgid "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:554 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/ref-example-projects.rst:28 -msgid "" -"`Quickstart PyTorch (Code) " -"`_" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/ref-example-projects.rst:35 -msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/ref-example-projects.rst:38 -msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:570 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:572 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:574 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:576 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:578 +msgid "" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:580 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "" + +#: ../../source/ref-changelog.md:586 msgid "" -"`Flower simulation PyTorch " -"`_" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:588 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:590 +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:592 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:600 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:604 msgid "" -"`Android Kotlin example `_" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:606 +msgid "" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:608 +msgid "" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:610 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:612 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:614 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:616 msgid "" -"`Flower meets KOSMoS `_." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:618 msgid "" -"`Flower meets Talan blog post `_ ." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:620 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/ref-changelog.md:622 +msgid "" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:624 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:628 +msgid "" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:630 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:632 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:634 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:636 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:638 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "" - -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:640 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:642 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." -msgstr "" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:644 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:646 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:648 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:650 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:652 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:654 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:656 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:658 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:660 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:662 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/ref-telemetry.md:52 -msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:666 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:668 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:670 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:684 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:688 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:690 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "" - -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "" - -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:692 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:694 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:696 +msgid "" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:698 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:700 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/ref-changelog.md:702 +msgid "" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:704 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/ref-changelog.md:706 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/ref-changelog.md:708 +msgid "" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:710 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/ref-changelog.md:712 +msgid "" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:714 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/ref-changelog.md:716 +msgid "" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:718 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/ref-changelog.md:720 +msgid "" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/ref-changelog.md:722 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:724 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:726 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:730 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:732 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/ref-changelog.md:734 +msgid "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-changelog.md:748 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-changelog.md:752 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:754 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:756 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:758 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:760 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 -msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:764 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:766 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:768 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/ref-changelog.md:770 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +#: ../../source/ref-changelog.md:772 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:774 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:776 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-changelog.md:778 +msgid "" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:780 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:782 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:784 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:786 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:788 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +#: ../../source/ref-changelog.md:792 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:806 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:810 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:812 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:814 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +#: ../../source/ref-changelog.md:816 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:818 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +#: ../../source/ref-changelog.md:820 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/ref-changelog.md:822 +msgid "" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:823 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:824 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:825 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:827 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:829 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:831 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:833 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:835 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:837 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:839 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/ref-changelog.md:841 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:843 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:845 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:847 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:849 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:851 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:853 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:855 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:859 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/ref-changelog.md:863 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:873 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" +#: ../../source/ref-changelog.md:875 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" +#: ../../source/ref-changelog.md:879 +msgid "" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" +#: ../../source/ref-changelog.md:881 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:883 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/ref-changelog.md:885 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:887 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/ref-changelog.md:889 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:891 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:893 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:895 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/ref-changelog.md:897 +msgid "" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:899 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:901 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:903 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/ref-changelog.md:905 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:907 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-changelog.md:909 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:911 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:913 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#: ../../source/ref-changelog.md:915 +msgid "" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/ref-changelog.md:917 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:919 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/ref-changelog.md:921 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:923 +msgid "" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/ref-changelog.md:925 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 -msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:938 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:942 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:944 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" +#: ../../source/ref-changelog.md:948 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:950 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/ref-changelog.md:952 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:954 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/ref-changelog.md:956 +msgid "" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/ref-changelog.md:958 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 -msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "" + +#: ../../source/ref-changelog.md:964 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:966 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/ref-changelog.md:970 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" +#: ../../source/ref-changelog.md:972 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:974 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/ref-changelog.md:976 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:978 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:980 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:982 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:986 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:988 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:990 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/ref-changelog.md:992 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" +#: ../../source/ref-changelog.md:994 +msgid "" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:996 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:998 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" +#: ../../source/ref-changelog.md:1000 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:1002 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:1004 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:1008 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/ref-changelog.md:1010 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:1012 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/ref-changelog.md:1014 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:1016 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:1018 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/ref-changelog.md:1020 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:1022 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 -msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 -msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:1031 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:1033 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:1035 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:1037 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 -msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:1041 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:1042 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:1043 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:1044 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 -msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:1050 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +#: ../../source/ref-changelog.md:1052 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:1054 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" +#: ../../source/ref-changelog.md:1056 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +#: ../../source/ref-changelog.md:1058 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:1060 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:1062 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:1064 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:1066 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:1068 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +#: ../../source/ref-changelog.md:1070 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:1072 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:1074 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/ref-changelog.md:1076 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:1078 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:1082 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:1084 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:1086 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:1088 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:1090 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/ref-changelog.md:1092 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:1096 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" +#: ../../source/ref-changelog.md:1097 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:1098 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:1099 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:1100 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:1105 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:1106 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:1107 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/ref-changelog.md:1108 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:1109 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:1110 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +#: ../../source/ref-changelog.md:1111 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:1117 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +#: ../../source/ref-changelog.md:1119 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:1121 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/ref-changelog.md:1123 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:1125 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" +#: ../../source/ref-changelog.md:1127 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/ref-changelog.md:1129 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/ref-changelog.md:1131 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:1133 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 +#: ../../source/ref-changelog.md:1135 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1139 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1141 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:1143 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" +#: ../../source/ref-changelog.md:1147 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1149 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" +#: ../../source/ref-changelog.md:1151 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:1155 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:1157 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" +#: ../../source/ref-changelog.md:1159 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:1161 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:1166 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" +#: ../../source/ref-changelog.md:1167 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:1168 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" +#: ../../source/ref-changelog.md:1169 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" +#: ../../source/ref-changelog.md:1170 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1171 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:1175 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" +#: ../../source/ref-changelog.md:1177 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:1183 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:1185 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:1187 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1191 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:1192 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:1194 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:1198 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:1204 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:1205 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:1206 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1207 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:1208 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:1212 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:1216 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:1218 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:1220 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1222 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:1228 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:1233 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:1235 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:1237 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:1239 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:1243 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:1245 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "" + +#: ../../source/ref-changelog.md:1251 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:1255 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:1257 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:1259 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:1261 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:1267 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" +#: ../../source/ref-changelog.md:1269 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1290 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:1294 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1300 msgid "" -"`Check out Flower Code Examples " -"`__" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:1302 msgid "" -"`Use Flower Baselines for your research " -"`__" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/ref-changelog.md:1304 msgid "" -"`Watch Flower Summit 2023 videos `__" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:1306 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1323 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1327 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#: ../../source/ref-changelog.md:1329 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#: ../../source/ref-changelog.md:1350 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#: ../../source/ref-changelog.md:1354 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:1355 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1359 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1365 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" +#: ../../source/ref-changelog.md:1366 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1367 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1373 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 -msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1381 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1382 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1383 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1384 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1385 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-example-projects.rst:4 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/ref-example-projects.rst:10 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-example-projects.rst:14 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-example-projects.rst:17 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +#: ../../source/ref-example-projects.rst:18 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-example-projects.rst:19 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-example-projects.rst:25 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-example-projects.rst:28 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 -msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +#: ../../source/ref-example-projects.rst:29 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/ref-example-projects.rst:35 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-example-projects.rst:37 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-example-projects.rst:38 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-example-projects.rst:44 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-example-projects.rst:46 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-example-projects.rst:47 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-faq.rst:4 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 -msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-faq.rst:8 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-faq.rst:10 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." -msgstr "" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-faq.rst:11 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/ref-faq.rst:15 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/ref-faq.rst:19 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-faq.rst:21 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-faq.rst:26 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 -msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +#: ../../source/ref-faq.rst:30 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" +#: ../../source/ref-faq.rst:31 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-faq.rst:32 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-faq.rst:33 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" +#: ../../source/ref-faq.rst:34 +msgid "" +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-faq.rst:35 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-telemetry.md:3 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-telemetry.md:5 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-telemetry.md:11 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-telemetry.md:12 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-telemetry.md:13 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-telemetry.md:14 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-telemetry.md:18 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-telemetry.md:24 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-telemetry.md:30 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/ref-telemetry.md:32 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/ref-telemetry.md:36 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-telemetry.md:38 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/ref-telemetry.md:40 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-telemetry.md:44 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-telemetry.md:46 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-telemetry.md:52 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/ref-telemetry.md:66 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" +#: ../../source/tutorial-quickstart-android.rst:10 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/tutorial-quickstart-android.rst:12 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" +#: ../../source/tutorial-quickstart-fastai.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#: ../../source/tutorial-quickstart-fastai.rst:20 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +msgid "Next, activate your environment, then run:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/tutorial-quickstart-fastai.rst:43 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" +#: ../../source/tutorial-quickstart-fastai.rst:110 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/tutorial-quickstart-huggingface.rst:7 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +#: ../../source/tutorial-quickstart-huggingface.rst:14 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" +#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/tutorial-quickstart-huggingface.rst:28 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/tutorial-quickstart-huggingface.rst:113 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +msgid "The Data" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/tutorial-quickstart-huggingface.rst:132 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +msgid "The Model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:180 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#: ../../source/tutorial-quickstart-huggingface.rst:193 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +msgid "The ClientApp" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#: ../../source/tutorial-quickstart-huggingface.rst:241 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" +#: ../../source/tutorial-quickstart-huggingface.rst:296 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +msgid "The ServerApp" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#: ../../source/tutorial-quickstart-huggingface.rst:371 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" +#: ../../source/tutorial-quickstart-huggingface.rst:376 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/tutorial-quickstart-ios.rst:15 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/tutorial-quickstart-ios.rst:36 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" +#: ../../source/tutorial-quickstart-ios.rst:72 +msgid "" +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/tutorial-quickstart-ios.rst:99 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/tutorial-quickstart-ios.rst:102 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-scikitlearn.rst:167 +#: ../../source/tutorial-quickstart-xgboost.rst:341 +msgid "Flower Server" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:131 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +msgid "Train the model, federated!" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-xgboost.rst:567 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:10 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:16 +msgid "" +"Before we start building our JAX example, we need install the packages " +":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:24 +msgid "Linear Regression with JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:26 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a :code:`Linear Regression` model. If you want a more in-depth " +"explanation of what's going on then have a look at the official `JAX " +"documentation `_." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:29 +msgid "" +"Let's create a new file called :code:`jax_training.py` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " +"be imported. In addition, we need to import :code:`sklearn` since we use " +":code:`make_regression` for the dataset and :code:`train_test_split` to " +"split the dataset into a training and test set. You can see that we do " +"not yet import the :code:`flwr` package for federated learning. This will" +" be done later." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:43 +msgid "" +"The :code:`load_data()` function loads the mentioned training and test " +"sets." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:53 +msgid "" +"The model architecture (a very simple :code:`Linear Regression` model) is" +" defined in :code:`load_model()`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:65 +msgid "" +"We now need to define the training (function :code:`train()`), which " +"loops over the training set and measures the loss (function " +":code:`loss_fn()`) for each batch of training examples. The loss function" +" is separate since JAX takes derivatives with a :code:`grad()` function " +"(defined in the :code:`main()` function and called in :code:`train()`)." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:83 +msgid "" +"The evaluation of the model is defined in the function " +":code:`evaluation()`. The function takes all test examples and measures " +"the loss of the linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:94 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the :code:`jax.grad()` function is defined in " +":code:`main()` and passed to :code:`train()`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:111 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:117 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:121 +msgid "JAX meets Flower" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:123 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +":code:`jax_training.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server*, which averages all received " +"parameter updates. This describes one round of the federated learning " +"process, and we repeat this for multiple rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:145 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined JAX training in :code:`jax_training.py`. Our" +" *client* needs to import :code:`flwr`, but also :code:`jax` and " +":code:`jaxlib` to update the parameters on our JAX model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:160 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " +"easier to implement than :code:`Client` if you use a framework with good " +"NumPy interoperability (like JAX) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" +" to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid ":code:`set_parameters (optional)`" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "transform parameters to NumPy :code:`ndarray`'s" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:174 +msgid "get the updated local model parameters and return them to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:178 +msgid "return the local loss to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:180 +msgid "" +"The challenging part is to transform the JAX model parameters from " +":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" +" `NumPyClient`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`evaluate()` previously " +"defined in :code:`jax_training.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:251 +msgid "Having defined the federation process, we can run it." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:280 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:285 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:288 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "" +#: ../../source/tutorial-quickstart-mlx.rst:5 +msgid "Quickstart MLX" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:57 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:106 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:122 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:166 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:190 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:212 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:218 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:231 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:240 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:255 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:275 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:285 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:290 +msgid "Putting everything together we have:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:344 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:378 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:402 +#: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:407 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:121 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:159 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:184 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:236 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:294 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:323 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:365 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 +msgid "Video tutorial" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:376 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid ":code:`set_initial_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +":code:`FederatedDataset.load_partition()` method loads the partitioned " +"training set for each partition ID defined in the :code:`--partition-id` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid ":code:`set_parameters` (optional)" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:122 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "set the local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid "train the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +msgid "return the updated local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid "test the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "The methods can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:172 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy. Note that we also make use of Flower" +" Datasets here to load the test split of the MNIST dataset for server-" +"side evaluation." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:239 +#: ../../source/tutorial-quickstart-xgboost.rst:575 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:246 +#: ../../source/tutorial-quickstart-xgboost.rst:582 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:252 +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:7 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:118 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:147 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:178 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:212 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:247 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:299 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:39 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " +"the partition for the given client based on :code:`partition_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:205 +msgid "" +"All required parameters defined above are passed to :code:`XgbClient`'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:207 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:262 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. From the second round, we load the global " +"model sent from server to new build Booster object, and then update model" +" weights on local training data with function :code:`local_boost` as " +"follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:281 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`bst_input.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "" +"In :code:`evaluate`, after loading the global model, we call " +":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" +" value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:332 +msgid "" +"That's it for the client. We only have to implement :code:`Client` and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:343 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:346 +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:348 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:380 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients. The :code:`config_func` " +"function is to return the current FL round number to client's " +":code:`fit()` and :code:`evaluate()` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:384 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:396 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:398 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:400 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:496 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:555 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:560 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:565 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:641 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:646 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:650 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:659 +msgid "Cyclic training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:661 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:665 +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:705 +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:746 +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:749 +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:813 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:815 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:846 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:848 +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:880 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:883 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:887 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:888 +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:922 +msgid "" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:977 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:980 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1031 +msgid "" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1051 +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1094 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1096 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1142 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1146 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1200 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1204 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1282 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1285 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1287 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1294 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1300 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1306 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +msgid "Let's get started! 🌼" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +msgid "Install dependencies" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +msgid "Train the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Update model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Define the Flower ClientApp" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Clone o repositório do flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +msgid "Finally, we run the simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|cc080a555947492fa66131dc3a967603|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|163117eb654a4273babba413cf8065f5|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|f403fcd69e4e44409627e748b404c086|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|4b00fe63870145968f8443619a792a42|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|368378731066486fa4397e89bc6b870c|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|82324b9af72a4582a81839d55caab767|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" + +#~ msgid "Server-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" + +#~ msgid "The following examples are available as standalone projects." +#~ msgstr "" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" +#~ msgstr "" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" + +#~ msgid "Extra Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" + +#~ msgid "For PyTorch examples::" +#~ msgstr "" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" + +#~ msgid "PyTorch Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" + +#~ msgid "First, start a Flower server:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" + +#~ msgid "TensorFlow Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgstr "" + +#~ msgid "" +#~ "`Flower meets KOSMoS `_." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "" +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "``BASE_IMAGE_TAG``" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "The image tag of the base image." +#~ msgstr "A tag da imagem da imagem base." #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "Open the notebook ``doc/source/tutorial-" +#~ "get-started-with-flower-pytorch.ipynb``:" #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +#~ "/tutorial-get-started-with-flower-" +#~ "pytorch.ipynb" #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" +#~ "name/doc/source/tutorial-get-started-with-" +#~ "flower-pytorch.ipynb" #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "Virutualenv with Pyenv/Virtualenv" #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "Open a PR (as shown above)" +#~ msgstr "" + +#~ msgid "How to write a good PR title" #~ msgstr "" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" #~ msgstr "" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." #~ msgstr "" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" + +#~ msgid "Implement Algorithm" +#~ msgstr "" + +#~ msgid "Database" +#~ msgstr "" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "" + +#~ msgid "Improve code in module" +#~ msgstr "" + +#~ msgid "Change SomeModule" +#~ msgstr "" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" #~ msgstr "" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" #~ msgstr "" #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" #~ msgstr "" -#~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ msgid "Changelog entry" #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." #~ msgstr "" #~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." #~ msgstr "" #~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." #~ msgstr "" #~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid " is for classifying a PR as a general improvement." #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid " is to not add the PR to the changelog" #~ msgstr "" -#~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ msgid " is to add a general baselines change to the PR" #~ msgstr "" -#~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ msgid " is to add a general examples change to the PR" #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid " is to add a general sdk change to the PR" #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid " is to add a general simulations change to the PR" #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid "Note that only one token should be used." #~ msgstr "" #~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" #~ msgstr "" #~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." #~ msgstr "" #~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" #~ msgstr "" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "" + +#~ msgid "Example: MXNet - Run MXNet Federated" #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "" + +#~ msgid "MNIST Training with MXNet" #~ msgstr "" #~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." #~ msgstr "" #~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." #~ msgstr "" -#~ msgid "start_client" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "MXNet meets Flower" #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server can " +#~ "be started and how a Flower client" +#~ " can establish a secure connections " +#~ "to it." #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid "" +#~ "The code example comes with a " +#~ "README.md file which will explain how" +#~ " to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how. Stick to " +#~ "this guide for a deeper introduction " +#~ "to the topic." #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh`" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid "with the following command sequence:" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid "" +#~ "The approach how the SSL certificates" +#~ " are generated in this example can" +#~ " serve as an inspiration and starting" +#~ " point but should not be taken " +#~ "as complete for production environments. " +#~ "Please refer to other sources regarding" +#~ " the issue of correctly generating " +#~ "certificates for production environments." #~ msgstr "" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." #~ msgstr "" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." #~ msgstr "" #~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ "We are now going to show how " +#~ "to write a client which uses the" +#~ " previously generated scripts:" #~ msgstr "" #~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects the PEM-encoded root " +#~ "certificates as a byte string. We " +#~ "are again using :code:`Path` to simplify" +#~ " reading those as byte strings." #~ msgstr "" #~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ "You should now have learned how to" +#~ " generate self-signed certificates using" +#~ " the given script, start a SSL-" +#~ "enabled server, and have a client " +#~ "establish a secure connection to it." #~ msgstr "" #~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "Flower server" #~ msgstr "" #~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." #~ msgstr "" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" #~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" #~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." #~ msgstr "" #~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." #~ msgstr "" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." #~ msgstr "" -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgid "Using a different Flower or Python version" #~ msgstr "" -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" #~ msgstr "" -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgid "Next, we can pin the hash when running a new server container:" #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgid "" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " +#~ "` | :doc" +#~ ":`scikit-learn `" +#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" #~ msgstr "" -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgid "flower-driver-api" #~ msgstr "" -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "flower-fleet-api" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid "Run Flower server (Driver API and Fleet API)." #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid "Start a Flower Driver API server." #~ msgstr "" #~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "`Driver` class provides an interface to the Driver API." #~ msgstr "" #~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." #~ msgstr "" -#~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ msgid ":py:obj:`close `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Source: `Official VSCode documentation " -#~ "`_" +#~ msgid "Disconnect from the SuperLink if connected." #~ msgstr "" #~ msgid "" -#~ "`Developing inside a Container " -#~ "`_" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" #~ msgstr "" #~ msgid "" -#~ "`Remote development in Containers " -#~ "`_" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." #~ msgstr "" -#~ msgid "" -#~ "If you are not familiar with " -#~ "Flower Baselines, you should probably " -#~ "check-out our `contributing guide for " -#~ "baselines `_." +#~ msgid "start\\_driver" #~ msgstr "" #~ msgid "" -#~ "You should then check out the open" -#~ " `issues " -#~ "`_" -#~ " for baseline requests. If you find" -#~ " a baseline that you'd like to " -#~ "work on and that has no assignes," -#~ " feel free to assign it to " -#~ "yourself and start working on it!" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." #~ msgstr "" #~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_." +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." #~ msgstr "" #~ msgid "" -#~ "Git is a distributed version control " -#~ "tool. This allows for an entire " -#~ "codebase's history to be stored and " -#~ "every developer's machine. It is a " -#~ "software that will need to be " -#~ "installed on your local machine, you " -#~ "can follow this `guide " -#~ "`_ to set it up." +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." #~ msgstr "" -#~ msgid "" -#~ "A fork is a personal copy of " -#~ "a GitHub repository. To create one " -#~ "for Flower, you must navigate to " -#~ "https://github.com/adap/flower (while connected to" -#~ " your GitHub account) and click the" -#~ " ``Fork`` button situated on the top" -#~ " right of the page." +#~ msgid "The Driver object to use." #~ msgstr "" -#~ msgid "" -#~ "Now we will add an upstream " -#~ "address to our repository. Still in " -#~ "the same directroy, we must run " -#~ "the following command:" +#~ msgid "Starting a driver that connects to an insecure server:" #~ msgstr "" -#~ msgid "" -#~ "This can be achieved by following " -#~ "this `getting started guide for " -#~ "contributors`_ (note that you won't need" -#~ " to clone the repository). Once you" -#~ " are able to write code and " -#~ "test it, you can finally start " -#~ "making changes!" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" #~ msgstr "" #~ msgid "" -#~ "For our documentation, we’ve started to" -#~ " use the `Diàtaxis framework " -#~ "`_." +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Our “How to” guides should have " -#~ "titles that continue the sencence “How" -#~ " to …”, for example, “How to " -#~ "upgrade to Flower 1.0”." +#~ msgid "Run Simulation Engine from the CLI." #~ msgstr "" -#~ msgid "" -#~ "This issue is about changing the " -#~ "title of a doc from present " -#~ "continious to present simple." +#~ msgid "run\\_simulation\\_from\\_cli" #~ msgstr "" #~ msgid "" -#~ "Let's take the example of “Saving " -#~ "Progress” which we changed to “Save " -#~ "Progress”. Does this pass our check?" -#~ msgstr "" - -#~ msgid "Before: ”How to saving progress” ❌" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." #~ msgstr "" -#~ msgid "After: ”How to save progress” ✅" +#~ msgid "Quickstart MXNet" #~ msgstr "" #~ msgid "" -#~ "This is a tiny change, but it’ll" -#~ " allow us to test your end-" -#~ "to-end setup. After cloning and " -#~ "setting up the Flower repo, here’s " -#~ "what you should do:" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." #~ msgstr "" #~ msgid "" -#~ "Build the docs and check the " -#~ "result: ``_" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." #~ msgstr "" -#~ msgid "Here’s how to change the file name:" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" #~ msgstr "" #~ msgid "" -#~ "Commit the changes (commit messages are" -#~ " always imperative: “Do something”, in " -#~ "this case “Change …”)" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "`Good first contributions " -#~ "`_, where you should" -#~ " particularly look into the " -#~ ":code:`baselines` contributions." +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" + +#~ msgid "In addition, define the device allocation in MXNet with:" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." #~ msgstr "" #~ msgid "" -#~ "Flower uses :code:`pyproject.toml` to manage" -#~ " dependencies and configure development " -#~ "tools (the ones which support it). " -#~ "Poetry is a build tool which " -#~ "supports `PEP 517 " -#~ "`_." +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing machine learning" -#~ " workload with `FedBN `_, a federated training strategy" -#~ " designed for non-iid data. We " -#~ "are using PyTorch to train a " -#~ "Convolutional Neural Network(with Batch " -#~ "Normalization layers) on the CIFAR-10 " -#~ "dataset. When applying FedBN, only few" -#~ " changes needed compared to `Example: " -#~ "PyTorch - From Centralized To Federated" -#~ " `_." +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." #~ msgstr "" #~ msgid "" -#~ "All files are revised based on " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. The " -#~ "only thing to do is modifying the" -#~ " file called :code:`cifar.py`, revised part" -#~ " is shown below:" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "" + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used PyTorch " -#~ "before. Let's take the next step " -#~ "and use what we've built to create" -#~ " a federated learning system within " -#~ "FedBN, the sytstem consists of one " -#~ "server and two clients." +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" #~ msgid "" -#~ "If you have read `Example: PyTorch " -#~ "- From Centralized To Federated " -#~ "`_, the following" -#~ " parts are easy to follow, onyl " -#~ ":code:`get_parameters` and :code:`set_parameters` " -#~ "function in :code:`client.py` needed to " -#~ "revise. If not, please read the " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. first." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgid "They can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" -#~ msgid "Ready... Set... Train!" +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" -#~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" +#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" #~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgid ":code:`load_mnist()`" #~ msgstr "" -#~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ msgid "Loads the MNIST dataset using OpenML" #~ msgstr "" -#~ msgid "Now, let's see what is really happening inside." +#~ msgid ":code:`shuffle()`" #~ msgstr "" -#~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ msgid "Shuffles data and its label" #~ msgstr "" -#~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ msgid ":code:`partition()`" +#~ msgstr "" + +#~ msgid "Splits datasets into a number of partitions" #~ msgstr "" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy." #~ msgstr "" -#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgid "Let's get stated!" #~ msgstr "" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4500 training examples and" +#~ " 500 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." #~ msgstr "" -#~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" #~ msgstr "" -#~ msgid "A Closer Look" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" #~ msgstr "" -#~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" #~ msgstr "" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" #~ msgstr "" -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ msgid "|7f0ee162da38450788493a21627306f7|" #~ msgstr "" -#~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" #~ msgstr "" -#~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" + +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" #~ msgstr "" -#~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ msgid "Give It a Try" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" #~ msgstr "" -#~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" #~ msgstr "" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" #~ msgstr "" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." #~ msgstr "" +#~ "Atualmente, Flower fornece duas imagens, " +#~ "uma imagem base e uma imagem de" +#~ " servidor. Também haverá uma imagem " +#~ "de cliente em breve. A imagem " +#~ "base, como o nome sugere, contém " +#~ "dependências básicas que tanto o " +#~ "servidor quanto o cliente precisam. Isso" +#~ " inclui dependências do sistema, Python " +#~ "e ferramentas Python. A imagem do " +#~ "servidor é baseada na imagem base, " +#~ "mas também instala o servidor Flower " +#~ "usando ``pip```." -#~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ msgid "``3.11``" +#~ msgstr "``3.11``" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Building the SuperLink image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "Pré-definido para ``flwr/server``." + +#~ msgid "The Python version of the base image." +#~ msgstr "O nome do repositório da imagem base." + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." + +#~ msgid "The PyPI package to install." #~ msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Pré-definido para ``flwr/server``." + #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." #~ msgstr "" +#~ "O nome da imagem é ``flwr_server`` " +#~ "e a tag ``0.1.0``. Lembre-se que" +#~ " os argumentos de compilação, bem " +#~ "como o nome e a tag podem " +#~ "ser adaptados às suas necessidades. " +#~ "Esses valores servem apenas como " +#~ "exemplos." -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "" +#~ msgid "Creating New Messages" +#~ msgstr "Criando novas mensagens" -#~ msgid "Differential privacy" +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." #~ msgstr "" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." #~ msgstr "" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ msgid "Server's side:" +#~ msgstr "" + +#~ msgid "Client's side:" #~ msgstr "" #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" #~ msgstr "" -#~ msgid "DP-FedAvg" +#~ msgid "Message Types for Protocol Buffers" #~ msgstr "" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ msgid "Within the :code:`ServerMessage` block:" #~ msgstr "" -#~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ msgid "Within the ClientMessage block:" #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." #~ msgstr "" -#~ msgid "Simplifying Assumptions" +#~ msgid "Once that is done, we will compile the file with:" #~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid "If it compiles successfully, you should see the following message:" #~ msgstr "" -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ msgid "Serialization and Deserialization Functions" #~ msgstr "" #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." #~ msgstr "" -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ msgid "The four functions:" #~ msgstr "" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ msgid "Sending the Message from the Server" #~ msgstr "" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" #~ msgstr "" -#~ msgid "Customizable Responsibility for Noise injection" +#~ msgid "Receiving the Message by the Client" #~ msgstr "" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" -#~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgid "Within the handle function:" #~ msgstr "" -#~ msgid "Wrapper-based approach" +#~ msgid "And add a new function:" #~ msgstr "" -#~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ msgid "Hopefully, when you run your program you will get the intended result!" #~ msgstr "" -#~ msgid "Server-side logic" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." #~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" #~ msgstr "" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." #~ msgstr "" -#~ msgid "Client-side logic" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ msgid "Run Flower server (Driver API)." #~ msgstr "" -#~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ msgid "Run Flower server (Fleet API)." #~ msgstr "" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgid "Unreleased" +#~ msgstr "" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" #~ msgstr "" -#~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" #~ msgstr "" -#~ msgid "" -#~ "McMahan et al. \"Learning Differentially " -#~ "Private Recurrent Language Models.\" " -#~ "International Conference on Learning " -#~ "Representations (ICLR), 2017." +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" #~ msgstr "" -#~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "Private Learning with Adaptive Clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems (NeurIPS), 2021." +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" #~ msgstr "" -#~ msgid "" -#~ "This can be achieved by customizing " -#~ "an existing strategy or by `implementing" -#~ " a custom strategy from scratch " -#~ "`_. Here's a nonsensical " -#~ "example that customizes :code:`FedAvg` by " -#~ "adding a custom ``\"hello\": \"world\"`` " -#~ "configuration key/value pair to the " -#~ "config dict of a *single client* " -#~ "(only the first client in the " -#~ "list, the other clients in this " -#~ "round to not receive this \"special\"" -#~ " config value):" +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" #~ msgstr "" -#~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_fit` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_fit`." +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" #~ msgstr "" -#~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_evaluate` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_evaluate`." +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" #~ msgstr "" -#~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" #~ msgstr "" -#~ msgid "" -#~ "Ray Dashboard: ``_" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" #~ msgstr "" -#~ msgid "" -#~ "Ray Metrics: ``_" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" #~ msgstr "" -#~ msgid "Flower driver SDK." +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" #~ msgstr "" -#~ msgid "driver" +#~ msgid "Edge Client Engine" +#~ msgstr "Engine do Edge Client" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" #~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Edge " +#~ "Client" + +#~ msgid "Virtual Client Engine" +#~ msgstr "Engine do Virtual Client" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Virtual" +#~ " Client" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" #~ msgstr "" +#~ "Engine do Virtual Client e do Edge" +#~ " Client no mesma carga de trabalho" +#~ " (workload)" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" #~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com ambas engines do" +#~ " Virtual Client e do Edge Client" + +#~ msgid "Clone the flower repository." +#~ msgstr "Clone o repositório do flower." #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" +#~ "Por favor, siga a primeira seção " +#~ "em :doc:`Execute o Flower usando Docker" +#~ " `" +#~ " que cobre este passo em mais " +#~ "detalhes." -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "``22.04``" +#~ msgstr "``23.0.1``" + +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" + +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.7.0``" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" #~ msgstr "" +#~ "O exemplo a seguir cria uma imagem" +#~ " de servidor com a imagem base " +#~ "oficial do Flower py3.11-ubuntu22.04 e " +#~ "Flower 1.7.0:" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Versão da imagem Docker oficial do Ubuntu." #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "**Via the UI**" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid "Click on the **green** ``Run workflow`` button." #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." #~ msgstr "" -#~ msgid "Request for run ID." +#~ msgid "Preliminarities" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ msgid "Example: JAX - Run JAX Federated" #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid "" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ "The following command can be used " +#~ "to verify if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" #~ msgstr "" -#~ msgid "Get client IDs." +#~ msgid ":doc:`How to run Flower using Docker `" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ msgid "Before you start, make sure that the Docker daemon is running:" #~ msgstr "" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." #~ msgstr "" #~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" #~ msgid "" -#~ "The first preview release of Flower " -#~ "Baselines has arrived! We're kickstarting " -#~ "Flower Baselines with implementations of " -#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," -#~ " and FedAvgM. Check the documentation " -#~ "on how to use [Flower " -#~ "Baselines](https://flower.ai/docs/using-baselines.html). " -#~ "With this first preview release we're" -#~ " also inviting the community to " -#~ "[contribute their own " -#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." #~ msgstr "" -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ msgid "Flower SuperLink" #~ msgstr "" -#~ msgid "The following examples are available as standalone projects." +#~ msgid "Quickstart" #~ msgstr "" -#~ msgid "Quickstart TensorFlow/Keras" +#~ msgid "If you're looking to try out Flower, you can use the following command:" #~ msgstr "" #~ msgid "" -#~ "`Quickstart TensorFlow (Tutorial) " -#~ "`_" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" #~ msgid "" -#~ "`Quickstart PyTorch (Tutorial) " -#~ "`_" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" #~ msgid "" -#~ "`PyTorch: From Centralized To Federated " -#~ "(Tutorial) `_" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." #~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" #~ msgstr "" -#~ msgid "Extra Dependencies" +#~ msgid "Mounting a volume to store the state on the host system" #~ msgstr "" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." #~ msgstr "" -#~ msgid "For PyTorch examples::" +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" -#~ msgid "For TensorFlow examples::" +#~ msgid "Enabling SSL for secure connections" #~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." #~ msgstr "" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." #~ msgstr "" -#~ msgid "PyTorch Examples" +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" + +#~ msgid "Flower SuperNode" #~ msgstr "" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." #~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." #~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." #~ msgstr "" +#~ "Antes de começarmos, precisamos encontrar " +#~ "alguns pré-requisitos em nosso ambiente " +#~ "de desenvolvimento local." -#~ msgid "First, start a Flower server:" +#~ msgid "Creating a SuperNode Dockerfile" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "Let's assume the following project layout:" #~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." #~ msgstr "" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "" +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Construindo a imagem do servidor" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ msgid "TensorFlow Examples" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." #~ msgstr "" -#~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ msgid "Let's break down each part of this command:" #~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" +#~ msgid "``docker run``: This is the command to run a new Docker container." #~ msgstr "" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "``--insecure``: This option enables insecure communication." #~ msgstr "" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" #~ msgstr "" -#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." #~ msgstr "" #~ msgid "" -#~ "`Flower meets KOSMoS `_." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." #~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the full code example: " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" #~ msgstr "" #~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." #~ msgstr "" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." #~ msgstr "" -#~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ msgid "Flower ServerApp" #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." #~ msgstr "" #~ msgid "" -#~ "It is recommended to create a " -#~ "virtual environment and run everything " -#~ "within this `virtualenv `_." +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" #~ msgid "" -#~ "First of all, it is recommended to" -#~ " create a virtual environment and run" -#~ " everything within a `virtualenv " -#~ "`_." +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." #~ msgstr "" -#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgid "Creating a ServerApp Dockerfile" #~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML `_, a popular" -#~ " image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The utility :code:`utils.load_mnist()` downloads " -#~ "the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." #~ msgstr "" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." #~ msgstr "" -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" #~ msgstr "" -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." #~ msgstr "" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." #~ msgstr "" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid "Advanced Docker options" #~ msgstr "" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid "Run with root user privileges" #~ msgstr "" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." #~ msgstr "" -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid "**Run a container with root user privileges**" #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "``BASE_IMAGE_TAG``" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#~ msgid "The image tag of the base image." -#~ msgstr "A tag da imagem da imagem base." +#~ msgid "Using a different Flower version" +#~ msgstr "" -#~ msgid "" -#~ "Open the notebook ``doc/source/tutorial-" -#~ "get-started-with-flower-pytorch.ipynb``:" +#~ msgid "Pinning a Docker image to a specific version" #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -#~ "/tutorial-get-started-with-flower-" -#~ "pytorch.ipynb" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" -#~ "name/doc/source/tutorial-get-started-with-" -#~ "flower-pytorch.ipynb" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" #~ msgstr "" -#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" #~ msgstr "" -#~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ msgid "Setting environment variables" #~ msgstr "" -#~ msgid "Open a PR (as shown above)" +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." #~ msgstr "" -#~ msgid "How to write a good PR title" +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." #~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and poetentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`scikit-" +#~ "learn ` | " +#~ ":doc:`XGBoost ` |" +#~ " :doc:`Android ` " +#~ "| :doc:`iOS `" #~ msgstr "" -#~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ msgid "flower-client-app" #~ msgstr "" -#~ msgid "Implement Algorithm" +#~ msgid ":py:obj:`flwr.client `\\" #~ msgstr "" -#~ msgid "Database" +#~ msgid ":py:obj:`flwr.common `\\" #~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" +#~ msgid ":py:obj:`flwr.server `\\" #~ msgstr "" -#~ msgid "Improve code in module" +#~ msgid ":py:obj:`flwr.simulation `\\" #~ msgstr "" -#~ msgid "Change SomeModule" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ msgid "Run Flower client app." #~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" #~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgid "Run Flower SuperNode." #~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid ":py:obj:`flwr.client.mod `\\" #~ msgstr "" -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" +#~ msgid ":py:obj:`Context `\\ \\(state\\)" #~ msgstr "" -#~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ msgid "State of your run." +#~ msgstr "" + +#~ msgid "Metrics record." +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" #~ msgstr "" -#~ msgid "Changelog entry" +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ msgid "d defaults to None." #~ msgstr "" -#~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid ":py:obj:`partition_id `\\" #~ msgstr "" -#~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ msgid "An identifier telling which data partition a ClientApp should use." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid "Run Flower server app." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgid ":py:obj:`flwr.server.strategy `\\" #~ msgstr "" -#~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ msgid ":py:obj:`flwr.server.workflow `\\" #~ msgstr "" -#~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgid "run\\_driver\\_api" #~ msgstr "" -#~ msgid "MNIST Training with MXNet" +#~ msgid "run\\_fleet\\_api" #~ msgstr "" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ msgid "key shares." #~ msgstr "" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." -#~ msgstr "" - -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." #~ msgstr "" -#~ msgid "MXNet meets Flower" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." #~ msgstr "" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." #~ msgstr "" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" #~ msgstr "" -#~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" + +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." #~ msgstr "" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." #~ msgstr "" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" #~ msgid "" -#~ "This guide describes how to a " -#~ "SSL-enabled secure Flower server can " -#~ "be started and how a Flower client" -#~ " can establish a secure connections " -#~ "to it." +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" #~ msgid "" -#~ "The code example comes with a " -#~ "README.md file which will explain how" -#~ " to start it. Although it is " -#~ "already SSL-enabled, it might be " -#~ "less descriptive on how. Stick to " -#~ "this guide for a deeper introduction " -#~ "to the topic." +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" #~ msgid "" -#~ "Using SSL-enabled connections requires " -#~ "certificates to be passed to the " -#~ "server and client. For the purpose " -#~ "of this guide we are going to " -#~ "generate self-signed certificates. As " -#~ "this can become quite complex we " -#~ "are going to ask you to run " -#~ "the script in :code:`examples/advanced-" -#~ "tensorflow/certificates/generate.sh`" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ msgid "with the following command sequence:" +#~ msgid "receive the updated local model weights" #~ msgstr "" -#~ msgid "" -#~ "The approach how the SSL certificates" -#~ " are generated in this example can" -#~ " serve as an inspiration and starting" -#~ " point but should not be taken " -#~ "as complete for production environments. " -#~ "Please refer to other sources regarding" -#~ " the issue of correctly generating " -#~ "certificates for production environments." +#~ msgid "which can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a client which uses the" -#~ " previously generated scripts:" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" #~ msgid "" -#~ "When setting :code:`root_certificates`, the " -#~ "client expects the PEM-encoded root " -#~ "certificates as a byte string. We " -#~ "are again using :code:`Path` to simplify" -#~ " reading those as byte strings." +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ "You should now have learned how to" -#~ " generate self-signed certificates using" -#~ " the given script, start a SSL-" -#~ "enabled server, and have a client " -#~ "establish a secure connection to it." +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Flower server" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." #~ msgstr "" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." #~ msgstr "" #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." #~ msgstr "" -#~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ msgid "Let's build a new ``Strategy`` from scratch!" #~ msgstr "" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "`Check out Flower Code Examples " +#~ "`__" #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." #~ msgstr "" -#~ msgid "" -#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " -#~ "` | :doc:`🤗 " -#~ "Transformers ` " -#~ "| :doc:`JAX ` |" -#~ " :doc:`Pandas ` " -#~ "| :doc:`fastai `" -#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " -#~ "` | :doc" -#~ ":`scikit-learn `" -#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ msgid "Loading the data" #~ msgstr "" -#~ msgid "flower-driver-api" +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." #~ msgstr "" -#~ msgid "flower-fleet-api" +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "Defining the model" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgid "Training the model" #~ msgstr "" -#~ msgid "Start a Flower Driver API server." +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" #~ msgstr "" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid "Updating model parameters" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." #~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "Implementing a Flower client" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." #~ msgstr "" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ msgid "start\\_driver" +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" #~ msgstr "" -#~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" + +#~ msgid "Using the Virtual Client Engine" #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "Starting the training" #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" + +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." #~ msgstr "" -#~ msgid "Quickstart MXNet" +#~ msgid "Let's move beyond FedAvg with Flower strategies!" #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" #~ msgstr "" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." #~ msgstr "" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." #~ msgstr "" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" #~ msgstr "" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" #~ msgstr "" -#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "They can be implemented in the following way:" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid ":code:`load_mnist()`" +#~ msgid ":py:obj:`client `\\" #~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgid ":py:obj:`common `\\" #~ msgstr "" -#~ msgid ":code:`shuffle()`" +#~ msgid ":py:obj:`server `\\" #~ msgstr "" -#~ msgid "Shuffles data and its label" +#~ msgid ":py:obj:`simulation `\\" #~ msgstr "" -#~ msgid ":code:`partition()`" +#~ msgid ":py:obj:`mod `\\" #~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "run\\_supernode" #~ msgstr "" #~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy." +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "Let's get stated!" +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4500 training examples and" -#~ " 500 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid ":py:obj:`strategy `\\" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid ":py:obj:`workflow `\\" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "run\\_server\\_app" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "run\\_superlink" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "Start a Ray-based Flower simulation server." #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "The total number of clients in this simulation." #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." #~ msgstr "" -#~ "Atualmente, Flower fornece duas imagens, " -#~ "uma imagem base e uma imagem de" -#~ " servidor. Também haverá uma imagem " -#~ "de cliente em breve. A imagem " -#~ "base, como o nome sugere, contém " -#~ "dependências básicas que tanto o " -#~ "servidor quanto o cliente precisam. Isso" -#~ " inclui dependências do sistema, Python " -#~ "e ferramentas Python. A imagem do " -#~ "servidor é baseada na imagem base, " -#~ "mas também instala o servidor Flower " -#~ "usando ``pip```." - -#~ msgid "``3.11``" -#~ msgstr "``3.11``" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "Como padrão ``22.04``." +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" -#~ msgid "Building the SuperLink image" -#~ msgstr "Construindo a imagem do servidor" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "Pré-definido para ``flwr/server``." +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "O nome do repositório da imagem base." +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "Como padrão ``22.04``." +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" -#~ msgid "The PyPI package to install." +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Pré-definido para ``flwr/server``." +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" -#~ "O nome da imagem é ``flwr_server`` " -#~ "e a tag ``0.1.0``. Lembre-se que" -#~ " os argumentos de compilação, bem " -#~ "como o nome e a tag podem " -#~ "ser adaptados às suas necessidades. " -#~ "Esses valores servem apenas como " -#~ "exemplos." -#~ msgid "Creating New Messages" -#~ msgstr "Criando novas mensagens" +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" #~ msgstr "" #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" -#~ msgid "Server's side:" +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" -#~ msgid "Client's side:" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." #~ msgstr "" -#~ msgid "Message Types for Protocol Buffers" +#~ msgid "Dependencies" #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgid "Standard Hugging Face workflow" #~ msgstr "" -#~ msgid "Within the ClientMessage block:" +#~ msgid "Handling the data" #~ msgstr "" #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" -#~ msgid "Once that is done, we will compile the file with:" +#~ msgid "Training and testing the model" #~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ msgid "Serialization and Deserialization Functions" +#~ msgid "Creating the model itself" #~ msgstr "" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ msgid "The four functions:" +#~ msgid "Federating the example" #~ msgstr "" -#~ msgid "Sending the Message from the Server" +#~ msgid "Creating the IMDBClient" #~ msgstr "" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" -#~ msgid "Receiving the Message by the Client" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" #~ msgstr "" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" -#~ msgid "Within the handle function:" +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." #~ msgstr "" -#~ msgid "And add a new function:" +#~ msgid "Putting everything together" #~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgid "We can now start client instances using:" #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" #~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" #~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" #~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" + +#~ msgid "Before Flower can be imported we have to install it:" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" #~ msgstr "" #~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" #~ msgstr "" -#~ msgid "Run Flower server (Driver API)." +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" -#~ msgid "Unreleased" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "Each client will have its own dataset." #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index e9279db19043..9af452fb0be2 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" @@ -17,44 +17,199 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Flower的架构" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#, fuzzy +msgid "Flower public API" +msgstr "Flower 客户端。" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "具有边缘客户端引擎的`Flower `核心架构" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "虚拟客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:100 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" -msgstr "具有虚拟客户端引擎的`Flower `核心架构" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" -msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "如何在本地搭建Docker Flower images" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -81,26 +236,15 @@ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决 #: ../../source/contributor-how-to-build-docker-images.rst:12 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**叉花仓库**" #: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 #, fuzzy msgid "Verify the Docker daemon is running." msgstr "验证 Docker 守护进程是否正在运行。" #: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -#, fuzzy -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"请阅读 :doc:`Run Flower using Docker ` " -"的第一节,其中更详细地介绍了这一步骤。" - -#: ../../source/contributor-how-to-build-docker-images.rst:25 #, fuzzy msgid "" "The build instructions that assemble the images are located in the " @@ -108,7 +252,7 @@ msgid "" "``src/docker``." msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` 的子目录中找到它们。" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:23 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -123,172 +267,194 @@ msgstr "" "``PYTHON_VERSION`` 联编参数指定要安装的 Python " "版本。有些联编参数有默认值,有些则必须在联编映像时指定。每个映像的所有可用联编参数都列在下表中。" -#: ../../source/contributor-how-to-build-docker-images.rst:35 +#: ../../source/contributor-how-to-build-docker-images.rst:30 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:36 #: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy msgid "Build argument" msgstr "构建文档" -#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:37 #: ../../source/contributor-how-to-build-docker-images.rst:99 #, fuzzy msgid "Description" msgstr "停用" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:38 #: ../../source/contributor-how-to-build-docker-images.rst:100 #, fuzzy msgid "Required" msgstr "所需变更" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:39 #: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/docker/persist-superlink-state.rst:18 +#: ../../source/docker/pin-version.rst:11 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "实例" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:40 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:41 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 +#: ../../source/contributor-how-to-build-docker-images.rst:42 +#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "No" msgstr "现在" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:44 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "``1.0.0rc1``" +#: ../../source/contributor-how-to-build-docker-images.rst:47 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:48 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Python 版本" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:49 #, fuzzy msgid "Version of ``python`` to be installed." msgstr "要安装的 ``python`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:52 #, fuzzy msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:53 #, fuzzy msgid "Version of ``pip`` to be installed." msgstr "要安装的 ``pip` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #: ../../source/contributor-how-to-build-docker-images.rst:108 #, fuzzy msgid "Yes" msgstr "类型" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#, fuzzy -msgid "``23.0.1``" -msgstr "``1.0.0rc1``" +#: ../../source/contributor-how-to-build-docker-images.rst:55 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:56 #, fuzzy msgid "``SETUPTOOLS_VERSION``" msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:57 #, fuzzy msgid "Version of ``setuptools`` to be installed." msgstr "要安装的 `setuptools`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:59 #, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" +msgid ":substitution-code:`|setuptools_version|`" +msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:60 #, fuzzy msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy msgid "Version of Flower to be installed." msgstr "要安装的 Flower 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:63 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:64 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:65 #, fuzzy msgid "The Flower package to be installed." msgstr "要安装的 PyPI 软件包。" -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:69 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "示例请求" + +#: ../../source/contributor-how-to-build-docker-images.rst:73 #, fuzzy msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 创建了基本映像:" #: ../../source/contributor-how-to-build-docker-images.rst:88 #, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "图像名称为 ``flwr_base``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" #: ../../source/contributor-how-to-build-docker-images.rst:92 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "启动服务器" +msgid "Building a Flower Binary Image" +msgstr "加载数据" #: ../../source/contributor-how-to-build-docker-images.rst:102 #, fuzzy @@ -316,32 +482,36 @@ msgid "The Tag of the Flower base image." msgstr "基础镜像的存储库名称。" #: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:111 -#, fuzzy msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" -msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:121 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "如果您想使用自己的基础图片而不是 Flower 官方的基础图片,只需设置 ``BASE_REPOSITORY`` 和 " "``BASE_IMAGE_TAG`` " "联编参数即可。`BASE_REPOSITORY``的值必须与您的图像名称一致,`BASE_IMAGE_TAG``的值必须与您的图像标签一致。" -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:132 #, fuzzy msgid "After creating the image, we can test whether the image is working:" msgstr "创建图像后,我们可以测试图像是否正常工作:" +#: ../../source/contributor-how-to-build-docker-images.rst:139 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "示例请求" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "贡献译文" @@ -688,8 +858,8 @@ msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (不含额 #: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" "``pip install " "'flwr[simulation]@git+https://github.com/adap/flower.git'``(带附加功能)" @@ -710,7 +880,9 @@ msgstr "" msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" -msgstr "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" +msgstr "" +"``pip install " +"'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" #: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Open Jupyter Notebooks on Google Colab" @@ -836,139 +1008,89 @@ msgstr "" msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "官方 Ubuntu Docker 映像的版本。" - #: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -#, fuzzy -msgid "**Via the UI**" -msgstr "**审查 PR**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "发布后" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:19 msgid "Create a pull request which contains the following changes:" msgstr "创建包含以下更改的拉取请求:" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:21 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "Update all files which contain the current version number if necessary." msgstr "如有必要,更新包含当前版本号的所有文件。" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:23 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:25 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:28 msgid "Publishing a pre-release" msgstr "发布预发布版本" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Pre-release naming" msgstr "释放前命名" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "阿尔法 ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:36 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "贝塔: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "版本代号 (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:39 msgid "Examples include:" msgstr "例子包括:" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:42 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:43 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "这符合 PEP-440 和 Python 包装管理局 (PyPA) 的建议:" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:52 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -988,26 +1110,26 @@ msgstr "" "规范不兼容,详情请查阅《语义版本规范》`_(特别是关于优先级的第 11 项)。" -#: ../../source/contributor-how-to-release-flower.rst:73 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "Pre-release classification" msgstr "发布前分类" -#: ../../source/contributor-how-to-release-flower.rst:75 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "下一个预发布版应该叫阿尔法版、贝塔版还是候选发布版?" -#: ../../source/contributor-how-to-release-flower.rst:77 +#: ../../source/contributor-how-to-release-flower.rst:59 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "Beta: feature complete, allowed to have known issues" msgstr "贝塔版:功能完整,允许存在已知问题" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:61 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "阿尔法版:功能不完整,允许存在已知问题" @@ -1032,11 +1154,11 @@ msgstr "Python 版本" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 #: ../../source/how-to-install-flower.rst:8 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower 至少需要 `Python 3.8 `_,但建议使用 `Python " +"Flower 至少需要 `Python 3.9 `_,但建议使用 `Python " "3.10 `_或更高版本。" #: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 @@ -1933,10 +2055,10 @@ msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" #: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "接下来的步骤" @@ -2056,14 +2178,17 @@ msgid "Get started as a contributor" msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "先决条件" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "Python 3.7 `_ 或更高版本" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 或更高版本" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2093,7 +2218,7 @@ msgstr "开发者机器设置" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 #, fuzzy -msgid "Preliminarities" +msgid "Preliminaries" msgstr "前言" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2161,20 +2286,20 @@ msgstr "" msgid "" "If you don't have :code:`pyenv` installed, the following script that will" " install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 #, fuzzy msgid "" "If you already have :code:`pyenv` installed (along with the :code:`pyenv-" "virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +":code:`Python 3.9.20` by default)::" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 #, fuzzy @@ -2321,18665 +2446,17157 @@ msgstr "" msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "启用 SSL 连接" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " -"从集中式到联邦式 `_ 做少量改动。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "集中式训练" +#: ../../source/docker/enable-tls.rst:7 +#, fuzzy +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 #, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" -msgstr "" -"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " -"的文件,修改部分如下所示:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." +msgstr "" +"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" -msgstr "现在,您可以运行您的机器学习工作了:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:14 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:27 #, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:29 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " -"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "联邦培训" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "Understanding the command" +msgstr "训练模型" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:213 +#: ../../source/docker/tutorial-quickstart-docker.rst:300 #, fuzzy -msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" + +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:214 +#: ../../source/docker/tutorial-quickstart-docker.rst:301 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " -":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " -"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." -msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " -":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " -"normalization层的参数。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "现在,您可以打开另外两个终端窗口并运行程序" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" +"This allows the container to access the TLS certificates that are stored " +"in the certificates" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "下一步工作" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"本示例的完整源代码可在 `_ " -"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " -"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "示例: JAX - 运行联邦式 JAX" - -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " -"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " -"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " -"`_" -" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " -"和 :code:`flwr`:" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "使用 JAX 进行线性回归" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " -"`JAX 文档 `_。" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " -"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " -":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " -"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." -msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." -msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " -":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " -"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" + +#: ../../source/docker/enable-tls.rst:71 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:73 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." -msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." +msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " +"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst:78 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " -"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " -":code:`train()`。" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX 结合 Flower" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " -":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " -":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "我们已经可以启动*服务器*了:" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" + +#: ../../source/docker/enable-tls.rst:107 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " -":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " -":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " -"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (可选)`" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "在本地模型上设置从服务器接收的模型参数" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "将参数转换为 NumPy :code:`ndarray`格式" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" -msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " -":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/index.rst:2 +#, fuzzy +msgid "Run Flower using Docker" +msgstr "使用 Docker 运行 Flower" + +#: ../../source/docker/index.rst:4 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "用从服务器接收到的参数更新本地模型的参数" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "在本地训练集上训练模型" +#: ../../source/docker/index.rst:7 +msgid "" +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" -msgstr "获取更新后的本地模型参数并返回服务器" +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "开始" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`evaluate`" +#: ../../source/docker/index.rst:20 +msgid "Running in Production" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "在本地测试集上评估更新后的模型" +#: ../../source/docker/index.rst:29 +#, fuzzy +msgid "Advanced Options" +msgstr "高级安装选项" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "向服务器返回本地损失值" +#: ../../source/docker/index.rst:41 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "使用 Docker 运行 Flower" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 -msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " -"`NumPyClient` 兼容。" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " -":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" - -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "定义了联邦进程后,我们就可以运行它了。" +"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " +"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" +#: ../../source/docker/persist-superlink-state.rst:7 +msgid "" +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/persist-superlink-state.rst:10 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" -msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/persist-superlink-state.rst:20 +#, fuzzy msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " -"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" +"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" +" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" +" 来指定数据库文件的名称。" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:35 +#, fuzzy msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" -msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." +msgstr "" +"服务器一启动,就会在主机系统的用户主目录下创建文件 " +"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "实例: PyTorch - 从集中式到联邦式" +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "将 Docker 映像固定到特定版本" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/pin-version.rst:4 +#, fuzzy msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " -"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" +"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " +"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/pin-version.rst:13 +#, fuzzy msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" + +#: ../../source/docker/pin-version.rst:22 +msgid "This will output" msgstr "" -"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " -"`_。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 -msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +#: ../../source/docker/pin-version.rst:29 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" + +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " -"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " -":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " -"中定义。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -":code:`load_data()` 函数加载 CIFAR-10 " -"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 -msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-root-user.rst:29 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "创建超级节点 Dockerfile" + +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "运行分类器和测试" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"到目前为止,如果你以前用过 " -"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-as-subprocess.rst:16 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower 服务器" + +#: ../../source/docker/run-as-subprocess.rst:30 +#, fuzzy msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " -"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " -"PyTorch 模型的参数:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." msgstr "" -"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " -":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "开始之前,请确保 Docker 守护进程正在运行:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "获取更新后的本地模型参数并发送回服务器" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "向服务器返回本地损失值和精确度" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 -msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." msgstr "" -"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " -":code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" -msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "示例请求" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" -" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "差分隐私" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "运行以下命令激活 virtualenv:" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 #, fuzzy msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." -msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 #, fuzzy +msgid "pyproject.toml" +msgstr "或 ``pyproject.toml```:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"试想一下,两个数据集除了一条记录(例如 Alice " -"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " -"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 #, fuzzy -msgid "DP Intro" -msgstr "DP 介绍" +msgid "Run the example:" +msgstr "将示例联邦化" -#: ../../source/explanation-differential-privacy.rst:22 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." -msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy -msgid "Formal Definition" -msgstr "编译 ProtoBuf 定义" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:26 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -"差分隐私(Differential " -"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" -" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " -"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/explanation-differential-privacy.rst:32 -#, fuzzy -msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." msgstr "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 #, fuzzy -msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." -msgstr "" -":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " -"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" +msgid "Limitations" +msgstr "运行模拟" -#: ../../source/explanation-differential-privacy.rst:45 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 #, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "差分隐私" +msgid "Quickstart Example" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 #, fuzzy -msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." -msgstr "" -"机器学习中可以利用 DP " -"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" -" DP " -"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" +msgid "quickstart-fastai" +msgstr "快速入门 fastai" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "无" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "扩大联邦学习的规模" +msgid "quickstart-huggingface" +msgstr "快速入门教程" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." -msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" +msgid "quickstart-jax" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." -msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy +msgid "quickstart-mlx" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " -"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." -msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" +msgid "quickstart-monai" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy -msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." -msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" +msgid "quickstart-pandas" +msgstr "快速入门Pandas" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 #, fuzzy -msgid "Central Differential Privacy" -msgstr "差分隐私" +msgid "quickstart-pytorch-lightning" +msgstr "快速入门 PyTorch Lightning" -#: ../../source/explanation-differential-privacy.rst:69 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." -msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 #, fuzzy -msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." -msgstr "" -"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" -" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" +msgid "quickstart-pytorch" +msgstr "PyTorch快速入门" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 #, fuzzy -msgid "clipping" -msgstr "剪贴" +msgid "quickstart-sklearn-tabular" +msgstr "scikit-learn快速入门" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 #, fuzzy -msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." -msgstr "" -"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " -",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" +msgid "quickstart-tabnet" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 #, fuzzy -msgid "Clipping" -msgstr "剪贴" +msgid "quickstart-tensorflow" +msgstr "快速入门 TensorFlow" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + +#: ../../source/docker/set-environment-variables.rst:2 #, fuzzy -msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." -msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" +msgid "Set Environment Variables" +msgstr "设置编码环境" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/set-environment-variables.rst:4 #, fuzzy msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." -msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy +msgid "Quickstart with Docker" +msgstr "快速入门 iOS" + +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." -msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst:7 msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." -msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -#, fuzzy -msgid "Local Differential Privacy" -msgstr "差分隐私" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " -"会降低准确性,但却能更好地保护隐私。" -#: ../../source/explanation-differential-privacy.rst:116 +#: ../../source/docker/tutorial-quickstart-docker.rst:50 #, fuzzy -msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" +msgid "Step 2: Start the SuperLink" +msgstr "然后,我们启动服务器:" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 #, fuzzy -msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +msgid "Open your terminal and run:" +msgstr "打开另一台终端,启动第二个客户端:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " -":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" -#: ../../source/explanation-differential-privacy.rst:120 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" -#: ../../source/explanation-differential-privacy.rst:125 -#, fuzzy -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." -msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:215 +#: ../../source/docker/tutorial-quickstart-docker.rst:304 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." -msgstr "请注意,这两种方法提供了不同层次的隐私。" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -#, fuzzy -msgid "**References:**" -msgstr "参考资料" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -#, fuzzy -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "[1] Dwork 等:《差分隐私的算法基础》。" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:216 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -"McMahan, H. Brendan等. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." -msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:80 #, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgid "Step 3: Start the SuperNode" +msgstr "然后,我们启动服务器:" + +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." msgstr "" -"Andrew, Galen等. \"Differentially private learning with adaptive " -"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " -"17455-17466." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "联邦学习评估" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 -msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." -msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "集中评估" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "内置策略" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" -msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "定制策略" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " -"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" - -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "联邦评估" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "实现联邦评估" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." -msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "配置联邦评估" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" -msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " -":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " -":code:`0.0`,联邦评估将被禁用。" -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " -"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#, fuzzy +msgid "Start the second container:" +msgstr "启动服务器" + +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -":code:`min_available_clients`: " -":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " -":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:148 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." -msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "评估训练期间的本地模型更新" +#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "Flower 客户端。" -#: ../../source/explanation-federated-evaluation.rst:137 -msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" -msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "创建超级节点 Dockerfile" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "完整代码示例" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " -"Example*(同样的方法也可应用于任何其他框架中): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 模板" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "目录" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[目录](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[总结](#summary)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[动机](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[目标](#goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "或 ``pyproject.toml```:" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[非目标](#non-goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[计划](#proposal)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[缺点](#drawbacks)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[备选方案](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[附录](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "总结" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 句子 1: 问题概括\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:184 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 句子 2: 解决方案概括\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#, fuzzy +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "动机" +#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#, fuzzy +msgid "" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "使用虚拟客户端引擎" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "目标" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "非目标" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "提案" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "缺点" +#: ../../source/docker/tutorial-quickstart-docker.rst:222 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "备选方案" +#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[备选 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#, fuzzy +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[备选 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower 改善文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:240 +msgid "" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[增强文档模版](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[描述数据](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[工作流程](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub 问题](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[谷歌文档](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "为提出更大规模的改动提供一个共同的结构" +#: ../../source/docker/tutorial-quickstart-docker.rst:277 +msgid "" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "确保改动的动机明确" +#: ../../source/docker/tutorial-quickstart-docker.rst:285 +#, fuzzy +msgid "Start the SuperExec container:" +msgstr "启动服务器" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "将项目信息保存在版本控制系统中" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "记录面向用户的具有影响力的改动的动机" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "保留 GitHub 问题,用于跟踪进行中的工作" +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "因此,\"增强文件\"将以下方面结合起来" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "一个功能和效力跟踪文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "一个产品需要文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "一个设计文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:315 +msgid "Step 6: Run the Quickstart Project" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 -msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "该文件是与社区合作逐步创建的。" +#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 -msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +#: ../../source/docker/tutorial-quickstart-docker.rst:326 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " -"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 -msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +#: ../../source/docker/tutorial-quickstart-docker.rst:332 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " -"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 -msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" +#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#, fuzzy +msgid "Step 7: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst:341 msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " -"\"的工作或使用方式。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 -msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#, fuzzy +msgid "quickstart_docker/task.py" +msgstr "快速入门Pandas" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 -msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" +#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#, fuzzy +msgid "Stop the current ClientApp containers:" +msgstr "当前客户端属性。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "增强文档模板" +#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "加载数据" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 -msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" +#: ../../source/docker/tutorial-quickstart-docker.rst:363 +msgid "Launch two new ClientApp containers based on the newly built image:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" +#: ../../source/docker/tutorial-quickstart-docker.rst:378 +msgid "Run the updated project:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "标题(与描述数据中的标题相同)" +#: ../../source/docker/tutorial-quickstart-docker.rst:385 +msgid "Step 8: Clean Up" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "目录(如有需要)" +#: ../../source/docker/tutorial-quickstart-docker.rst:387 +msgid "Remove the containers and the bridge network:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "注意事项/限制/警告(可选)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#, fuzzy +msgid "Where to Go Next" +msgstr "从哪里开始" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "设计细节(可选)" +#: ../../source/docker/tutorial-quickstart-docker.rst:401 +msgid ":doc:`enable-tls`" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "毕业标准" +#: ../../source/docker/tutorial-quickstart-docker.rst:402 +msgid ":doc:`persist-superlink-state`" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "升级/降级策略(如适用)" +#: ../../source/docker/tutorial-quickstart-docker.rst:403 +msgid ":doc:`tutorial-quickstart-docker-compose`" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "作为参考,本文件采用上述结构。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "快速入门 iOS" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "描述数据" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 +msgid "" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**标题** (必填)用简明语言写出提案的标题。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +msgid "Clone the Docker Compose ``complete`` directory:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower 服务器。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "**另见** (可选)与本提案相关的其他提案清单。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**取代**(可选) 这份提案所取代的提案列表。" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**被取代者** (可选) 此提案取代的提案列表。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 +msgid "" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "工作流程" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " -"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " -"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -"一旦增强功能通过审核和批准,其状态就会变为 " -"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +msgid "Step 3: Run the Quickstart Project" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`可实施`: 增强功能已审核通过。" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`已实施`: 增强功能已实施,不再主动更改。" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 -msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +msgid "quickstart-compose/pyproject.toml" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`撤回`: 作者已撤回增强功能。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +msgid "Execute the command to run the quickstart example:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`已替换`: 增强功能已被新的增强功能取代。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "In the next step, change the application code." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 问题" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "我们已经可以启动*服务器*了:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " -"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " -"问题时,管理这些多重讨论会很混乱。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "谷歌文档" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +msgid "If you haven't made any changes, you can skip this step." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" -" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 增强文件" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +msgid "Run the updated quickstart example:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +msgid "Step 5: Persisting the SuperLink State" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "自定义整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 +msgid "" +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" -msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -#, fuzzy -msgid "Authenticate SuperNodes" -msgstr "验证超级节点" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +msgid "Run the command:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " -"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/how-to-authenticate-supernodes.rst:7 -#, fuzzy -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" -msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" +"Docker merges Compose files according to `merging rules " +"`_." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 -#, fuzzy -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" -msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -#, fuzzy -msgid "SuperLink verifies the token" -msgstr "超级链接验证令牌" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +msgid "Check the content of the ``state`` directory:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" -#: ../../source/how-to-authenticate-supernodes.rst:15 -#, fuzzy -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." -msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." -msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -#, fuzzy -msgid "Enable node authentication in :code:`SuperLink`" -msgstr "在 :code:`SuperLink` 中启用节点验证" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " -"`_ " -"找到完整的指南。配置安全连接后,您就可以在长期运行的 Flower " -":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " -":code:`SuperNode`:" -#: ../../source/how-to-authenticate-supernodes.rst:38 -#, fuzzy -msgid "Let's break down the authentication flags:" -msgstr "让我们来分析一下身份验证标志:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 -#, fuzzy -msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +msgid "Step 7: Add another SuperNode" msgstr "" -"第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " -"CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:42 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -"存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " -"格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-authenticate-supernodes.rst:44 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -"第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" -"public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " -"384` 生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:47 -#, fuzzy -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +msgid "In ``compose.yml``, add the following:" msgstr "" -"在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " -"文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-authenticate-supernodes.rst:53 -#, fuzzy -msgid "Enable node authentication in :code:`SuperNode`" -msgstr "在 :code:`SuperNode` 中启用节点验证" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -"与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " -"客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-authenticate-supernodes.rst:66 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" -"public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " -"生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:70 -#, fuzzy -msgid "Security notice" -msgstr "安全通知" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +msgid "In ``with-tls.yml``, add the following:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 -#, fuzzy -msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +msgid "with-tls.yml" msgstr "" -"系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " -"假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "总结" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -"现在,您应该已经学会了如何启动长期运行的 Flower " -"服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "配置客户端" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:4 -msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +msgid "with-state.yml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "配置值" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#, fuzzy +msgid "Restart the services:" +msgstr "启动服务器" -#: ../../source/how-to-configure-clients.rst:9 -msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " -"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " -"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " -"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" - -#: ../../source/how-to-configure-clients.rst:26 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" - -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "通过内置策略进行配置" -#: ../../source/how-to-configure-clients.rst:32 -msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +msgid "Step 10: Clean Up" msgstr "" -"向客户端发送配置值的最简单方法是使用内置策略,如 " -":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-configure-clients.rst:34 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#, fuzzy +msgid "Remove all services and volumes:" +msgstr "从 R 中删除所有项目。" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" -msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "快速入门 iOS" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" +#: ../../source/docker/use-a-different-version.rst:2 +#, fuzzy +msgid "Use a Different Flower Version" +msgstr "使用不同的 Flower 或 Python 版本" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/use-a-different-version.rst:4 +#, fuzzy msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " -"发送不同的配置值(例如,使用不同的批量大小)。" +"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " +"`_ 上找到。" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/use-a-different-version.rst:9 +#, fuzzy msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " -"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " -"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" - -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" - -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "配置个别客户端" +"超级节点 Docker 映像目前仅适用于 1.9.0-nightly 版本。稳定版将在 Flower " +"1.9.0(稳定版)发布时推出(预计发布时间:5 " +"月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" -#: ../../source/how-to-configure-clients.rst:87 -msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 #, fuzzy msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " -"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " -"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" +"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " +"从集中式到联邦式 `_ 做少量改动。" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "配置日志记录" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +msgid "Centralized Training" +msgstr "集中式训练" -#: ../../source/how-to-configure-logging.rst:4 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#, fuzzy msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called :code:`cifar.py`, revised part " +"is shown below:" +msgstr "" +"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " +"的文件,修改部分如下所示:" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" -msgstr "" -"相关信息包括:日志信息级别(例如 " -":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" -msgstr "将日志保存到文件" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +msgid "You can now run your machine learning workload:" +msgstr "现在,您可以运行您的机器学习工作了:" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#, fuzzy msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " -":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " -"时(即执行 :code:`fl.simulation.start_simulation` " -"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " -"`_" -" 函数。例如:" +"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " +"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +msgid "Federated Training" +msgstr "联邦培训" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " +"function in :code:`client.py` needed to revise. If not, please read the " +":doc:`Example: PyTorch - From Centralized To Federated `. first." msgstr "" -"通过上述操作,Flower 会将您在终端上看到的日志记录到 " -":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " -":code:`identifier` 作为前缀:" - -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "记录自己的信息" +"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " +":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " +"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" +"Our example consists of one *server* and two *clients*. In FedBN, " +":code:`server.py` keeps unchanged, we can start the server directly." +msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" +"Finally, we will revise our *client* logic by changing " +":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " +"we will exclude batch normalization parameters from model parameter list " +"when sending to or receiving from the server." +msgstr "" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " +":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " +"normalization层的参数。" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "登录远程服务" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "现在,您可以打开另外两个终端窗口并运行程序" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." -msgstr "" -"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " -":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " -":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " -"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" +msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "启用 SSL 连接" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 +#: ../../source/tutorial-quickstart-jax.rst:283 +msgid "Next Steps" +msgstr "下一步工作" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." -msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" +msgstr "" +"本示例的完整源代码可在 `_ " +"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " +"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "实例: PyTorch - 从集中式到联邦式" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"有关安全连接的完整代码示例,请参见 `_ 。" +"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " +"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/how-to-enable-ssl-connections.rst:10 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." +msgstr "" +"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " +"`_。" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "证书" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +msgid "" +"Let's create a new file called :code:`cifar.py` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as :code:`torch` and :code:`torchvision`) need " +"to be imported. You can see that we do not import any package for " +"federated learning. You can keep all these imports as they are even when " +"we add the federated learning components at a later point." +msgstr "" +"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " +"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " +":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" -#: ../../source/how-to-enable-ssl-connections.rst:18 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in :code:`class Net()`." msgstr "" -"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " -":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" +"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " +"中定义。" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." -msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" +"The :code:`load_data()` function loads the CIFAR-10 training and test " +"sets. The :code:`transform` normalized the data after loading." +msgstr "" +":code:`load_data()` 函数加载 CIFAR-10 " +"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/how-to-enable-ssl-connections.rst:31 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" +"We now need to define the training (function :code:`train()`) which loops" +" over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." +msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" -#: ../../source/how-to-enable-ssl-connections.rst:39 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +msgid "" +"The evaluation of the model is defined in the function :code:`test()`. " +"The function loops over all test samples and measures the loss of the " +"model based on the test dataset." +msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" -#: ../../source/how-to-enable-ssl-connections.rst:41 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" -#: ../../source/how-to-enable-ssl-connections.rst:50 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." +msgstr "" +"到目前为止,如果你以前用过 " +"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/how-to-enable-ssl-connections.rst:54 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "客户端状态代码。" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +msgid "" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." +msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" -#: ../../source/how-to-enable-ssl-connections.rst:56 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" -#: ../../source/how-to-enable-ssl-connections.rst:64 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in :code:`cifar.py` for the *clients* that are connected to " +"the *server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " -":code:`Path` 来简化以字节字符串形式读取证书的过程。" +"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/how-to-enable-ssl-connections.rst:70 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:129 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." -msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" +"Our example consists of one *server* and two *clients*. Let's set up " +":code:`server.py` first. The *server* needs to import the Flower package " +":code:`flwr`. Next, we use the :code:`start_server` function to start a " +"server and tell it to perform three rounds of federated learning." +msgstr "" +"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " +":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" -msgstr "补充资源" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "We can already start the *server*:" +msgstr "我们已经可以启动*服务器*了:" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined centralized training in :code:`cifar.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " +"update the parameters on our PyTorch model:" +msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " +"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " +"PyTorch 模型的参数:" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "`让我们加密 `_" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" +msgstr "" +"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" +" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " +":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +msgid ":code:`set_parameters`" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "实施策略" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:166 +msgid "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "在本地模型上设置从服务器接收的模型参数" -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 +#: ../../source/tutorial-quickstart-jax.rst:168 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." -msgstr "" -"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" -" 提供了一些内置策略,这些策略基于下文所述的相同 API。" +"loop over the list of model parameters received as NumPy " +":code:`ndarray`'s (think list of neural network layers)" +msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr ":code:`策略 ` 抽象类" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#: ../../source/tutorial-quickstart-jax.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid ":code:`get_parameters`" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 +#: ../../source/tutorial-quickstart-jax.rst:170 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"get the model parameters and return them as a list of NumPy " +":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" -"所有策略实现均源自抽象基类 " -":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" +"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " +":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/how-to-implement-strategies.rst:18 -msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" -msgstr "策略抽象定义了一些需要实现的抽象方法:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid ":code:`fit`" +msgstr ":code:`fit`" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:176 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" -msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "用从服务器接收到的参数更新本地模型的参数" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Flower 服务器按以下顺序调用这些方法:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +msgid "train the model on the local training set" +msgstr "在本地训练集上训练模型" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "下文将详细介绍每种方法。" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +msgid "get the updated local model weights and return them to the server" +msgstr "获取更新后的本地模型参数并发送回服务器" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr ":code:`初始化参数` 方法" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid ":code:`evaluate`" +msgstr ":code:`evaluate`" -#: ../../source/how-to-implement-strategies.rst:182 -msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." -msgstr "" -":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " -":code:`Parameters` 对象)提供初始全局模型参数。" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 +#: ../../source/tutorial-quickstart-jax.rst:177 +msgid "evaluate the updated model on the local test set" +msgstr "在本地测试集上评估更新后的模型" -#: ../../source/how-to-implement-strategies.rst:184 -msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" -msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +msgid "return the local loss and accuracy to the server" +msgstr "向服务器返回本地损失值和精确度" -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`cifar.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." msgstr "" -"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " -":code:`initial_parameters` 的参数或 :code:`None`。如果 " -":code:`initialize_parameters` 没有返回任何参数(即 " -":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" +"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " +":code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." -msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr ":code:`configure_fit`方法" +"All that's left to do it to define a function that loads both model and " +"data, creates a :code:`CifarClient`, and starts this client. You load " +"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +"with the function :code:`fl.client.start_client()` by pointing it at the " +"same IP address we used in :code:`server.py`:" +msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" -#: ../../source/how-to-implement-strategies.rst:218 -msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" -msgstr "" -":code:`configure_fit` " -"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" -" 说明了这一点:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 +#: ../../source/tutorial-quickstart-jax.rst:274 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" +msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " -"对象)" +"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" +" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "差分隐私" + +#: ../../source/explanation-differential-privacy.rst:3 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" -msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`FitIns` 配对" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." +msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/explanation-differential-privacy.rst:6 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." -msgstr "" -"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." +msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/explanation-differential-privacy.rst:12 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" -" :code:`config` dict)。" +"试想一下,两个数据集除了一条记录(例如 Alice " +"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " +"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr ":code:`aggregate_fit` 方法" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "DP Intro" +msgstr "DP 介绍" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/explanation-differential-privacy.rst:22 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." -msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." +msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/explanation-differential-privacy.rst:25 +#, fuzzy +msgid "Formal Definition" +msgstr "编译 ProtoBuf 定义" + +#: ../../source/explanation-differential-privacy.rst:26 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " -":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" +"差分隐私(Differential " +"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" +" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " +"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " -"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " -"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" - -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr ":code:`configure_evaluate`方法" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/explanation-differential-privacy.rst:38 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -":code:`configure_evaluate` " -"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" -" 说明了这一点:" +":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " +"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" +#: ../../source/explanation-differential-privacy.rst:45 +#, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "差分隐私" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/explanation-differential-privacy.rst:46 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`EvaluateIns` 配对" +"机器学习中可以利用 DP " +"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" +" DP " +"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/explanation-differential-privacy.rst:53 #, fuzzy -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." -msgstr "" -"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" +msgid "Differential Privacy in Federated Learning" +msgstr "扩大联邦学习的规模" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/explanation-differential-privacy.rst:54 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." -msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" -" :code:`config` dict)。" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." +msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr ":code:`aggregate_evaluate` 方法" +#: ../../source/explanation-differential-privacy.rst:58 +#, fuzzy +msgid "" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/explanation-differential-privacy.rst:60 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " -"中选择并要求评估的客户端返回的结果。" +"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " +"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/explanation-differential-privacy.rst:63 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." -msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " -":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " -":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/explanation-differential-privacy.rst:65 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -":code:`aggregate_evaluate` 返回一个可选的 " -":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " -":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr ":code:`evaluate`方法" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:68 +#: ../../source/how-to-use-differential-privacy.rst:11 +#, fuzzy +msgid "Central Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/explanation-differential-privacy.rst:69 +#, fuzzy msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." -msgstr "" -":code:`evaluate` 负责在服务器端评估模型参数。除了 " -":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" -" 可以使策略同时执行服务器端和客户端(联邦)评估。" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." +msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/explanation-differential-privacy.rst:76 +#, fuzzy msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " -"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" +"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" +" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "安装Flower" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "clipping" +msgstr "剪贴" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" -msgstr "Python 版本" +#: ../../source/explanation-differential-privacy.rst:89 +#, fuzzy +msgid "" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." +msgstr "" +"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " +",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "安装稳定版" +#: ../../source/explanation-differential-privacy.rst:94 +#, fuzzy +msgid "Clipping" +msgstr "剪贴" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/explanation-differential-privacy.rst:96 #, fuzzy -msgid "Using pip" -msgstr "使用 pip" +msgid "" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-differential-privacy.rst:98 +#, fuzzy msgid "" -"Stable releases are available on `PyPI " -"`_::" -msgstr "稳定版本可在 `PyPI `_::" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-differential-privacy.rst:100 +#, fuzzy msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." +msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/explanation-differential-privacy.rst:102 #, fuzzy -msgid "Using conda (or mamba)" -msgstr "使用 conda(或 mamba)" +msgid "" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:105 +#: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy -msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" +msgid "Local Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:107 #, fuzzy msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" -msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." +msgstr "" +"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " +"会降低准确性,但却能更好地保护隐私。" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-differential-privacy.rst:116 +#, fuzzy +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" + +#: ../../source/explanation-differential-privacy.rst:118 #, fuzzy msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" -msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" +msgstr "" +"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " +":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" -#: ../../source/how-to-install-flower.rst:40 +#: ../../source/explanation-differential-privacy.rst:120 #, fuzzy -msgid "or with ``mamba``::" -msgstr "或用 ``mamba`` ::" +msgid "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" +msgstr "" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "验证安装" +#: ../../source/explanation-differential-privacy.rst:125 +#, fuzzy +msgid "" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:128 #, fuzzy msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "请注意,这两种方法提供了不同层次的隐私。" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "高级安装选项" +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy +msgid "**References:**" +msgstr "参考资料" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/explanation-differential-privacy.rst:133 #, fuzzy -msgid "Install via Docker" -msgstr "安装Flower" +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 等:《差分隐私的算法基础》。" -#: ../../source/how-to-install-flower.rst:60 +#: ../../source/explanation-differential-privacy.rst:135 #, fuzzy -msgid ":doc:`How to run Flower using Docker `" +msgid "" +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -"`TensorFlow快速入门 (教程) `_" - -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "安装预发布版本" +"McMahan, H. Brendan等. \"Learning differentially private recurrent " +"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-differential-privacy.rst:137 +#, fuzzy msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" -msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" -#: ../../source/how-to-install-flower.rst:69 -msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" +#: ../../source/explanation-differential-privacy.rst:139 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgstr "" +"Andrew, Galen等. \"Differentially private learning with adaptive " +"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " +"17455-17466." -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "安装隔夜版本" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "联邦学习评估" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" -msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." +msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" + +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "集中评估" + +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "内置策略" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" +msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "监控模拟" +#: ../../source/explanation-federated-evaluation.rst:58 +msgid "Custom Strategies" +msgstr "定制策略" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-federated-evaluation.rst:60 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"The :code:`Strategy` abstraction provides a method called " +":code:`evaluate` that can directly be used to evaluate the current global" +" model parameters. The current server implementation calls " +":code:`evaluate` after parameter aggregation and before federated " +"evaluation (see next paragraph)." msgstr "" -"Flower 允许您在运行模拟时监控系统资源。此外,Flower " -"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" +":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " +"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" -#: ../../source/how-to-monitor-simulation.rst:6 -msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" +#: ../../source/explanation-federated-evaluation.rst:65 +msgid "Federated Evaluation" +msgstr "联邦评估" -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "下载" +#: ../../source/explanation-federated-evaluation.rst:68 +msgid "Implementing Federated Evaluation" +msgstr "实现联邦评估" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-federated-evaluation.rst:70 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." -msgstr "" -"`Prometheus `_ 用于收集数据,而 `Grafana " -"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " -"`_ 紧密集成。" +"Client-side evaluation happens in the :code:`Client.evaluate` method and " +"can be configured from the server side." +msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-federated-evaluation.rst:101 +msgid "Configuring Federated Evaluation" +msgstr "配置联邦评估" + +#: ../../source/explanation-federated-evaluation.rst:103 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "如果你使用的是 M1 Mac,应该是这样:" +#: ../../source/explanation-federated-evaluation.rst:105 +msgid "" +":code:`fraction_evaluate`: a :code:`float` defining the fraction of " +"clients that will be selected for evaluation. If " +":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " +"are connected to the server, then :code:`10` will be randomly selected " +"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " +"federated evaluation will be disabled." +msgstr "" +":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " +":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " +":code:`0.0`,联邦评估将被禁用。" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "在上一代英特尔 Mac 设备上,应该是这样:" +#: ../../source/explanation-federated-evaluation.rst:106 +msgid "" +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " +"clients to be selected for evaluation. If :code:`fraction_evaluate` is " +"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " +":code:`100` clients are connected to the server, then :code:`20` clients " +"will be selected for evaluation." +msgstr "" +":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " +"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-federated-evaluation.rst:107 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" -msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" +":code:`min_available_clients`: an :code:`int` that defines the minimum " +"number of clients which need to be connected to the server before a round" +" of federated evaluation can start. If fewer than " +":code:`min_available_clients` are connected to the server, the server " +"will wait until more clients are connected before it continues to sample " +"clients for evaluation." +msgstr "" +":code:`min_available_clients`: " +":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " +":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-federated-evaluation.rst:108 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" -msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" +":code:`on_evaluate_config_fn`: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." +msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-federated-evaluation.rst:135 +msgid "Evaluating Local Model Updates During Training" +msgstr "评估训练期间的本地模型更新" + +#: ../../source/explanation-federated-evaluation.rst:137 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" -msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" +"Model parameters can also be evaluated during training. " +":code:`Client.fit` can return arbitrary evaluation results as a " +"dictionary:" +msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-federated-evaluation.rst:177 +msgid "Full Code Example" +msgstr "完整代码示例" + +#: ../../source/explanation-federated-evaluation.rst:179 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +msgstr "" +"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " +"Example*(同样的方法也可应用于任何其他框架中): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "跟踪指标" +#: ../../source/explanation-flower-architecture.rst:3 +msgid "Flower Architecture" +msgstr "Flower的架构" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-flower-architecture.rst:5 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" +"This page explains the architecture of deployed Flower federated learning" +" system." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-flower-architecture.rst:8 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "开始模拟时,请在 Python 代码中加入以下参数。" - -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "现在,您可以开始工作了。" +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-flower-architecture.rst:12 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "模拟启动后不久,您就会在终端中看到以下日志:" +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "您可以在 ``_ 查看所有内容。" +#: ../../source/explanation-flower-architecture.rst:16 +msgid "This is sometimes called a hub-and-spoke topology:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-flower-architecture.rst:24 +#, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "什么是联邦学习?" + +#: ../../source/explanation-flower-architecture.rst:24 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-flower-architecture.rst:26 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " -"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " -"Grafana。" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-flower-architecture.rst:31 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." -msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "资源分配" +#: ../../source/explanation-flower-architecture.rst:36 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-flower-architecture.rst:38 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-flower-architecture.rst:41 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"最初,模拟(由 Ray " -"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "在 Google Colab 中,您看到的结果可能与此类似:" +#: ../../source/explanation-flower-architecture.rst:47 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-flower-architecture.rst:49 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" - -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "我们还可以为单个客户指定资源。" +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-flower-architecture.rst:53 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-flower-architecture.rst:59 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " -"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " -"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "常见问题" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Flower的架构" -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "问:我没有看到任何指标记录。" +#: ../../source/explanation-flower-architecture.rst:71 +#, fuzzy +msgid "The basic Flower architecture for federated learning." +msgstr "本轮联邦学习。" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-flower-architecture.rst:79 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/explanation-flower-architecture.rst:82 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:226 -msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." -msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" - -#: ../../source/how-to-monitor-simulation.rst:228 -msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" - -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "资源" - -#: ../../source/how-to-monitor-simulation.rst:234 -#, fuzzy -msgid "" -"Ray Dashboard: ``_" -msgstr "Ray 仪表盘: ``_" - -#: ../../source/how-to-monitor-simulation.rst:236 -#, fuzzy -msgid "Ray Metrics: ``_" -msgstr "" -"Ray 指标: ``_" - -#: ../../source/how-to-run-flower-using-docker.rst:2 -#, fuzzy -msgid "Run Flower using Docker" -msgstr "使用 Docker 运行 Flower" - -#: ../../source/how-to-run-flower-using-docker.rst:4 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:87 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -"开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 `Docker Hub " -"`_ 上找到这些镜像。" -#: ../../source/how-to-run-flower-using-docker.rst:8 +#: ../../source/explanation-flower-architecture.rst:97 #, fuzzy -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "开始之前,请确保 Docker 守护进程正在运行:" +msgid "Multi-tenancy federated learning architecture" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/explanation-flower-architecture.rst:97 #, fuzzy -msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." -msgstr "" -"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在 " -"`_ 找到安装说明。" +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" -#: ../../source/how-to-run-flower-using-docker.rst:21 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:99 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 " -"`安装后步骤 `_进行操作。" -#: ../../source/how-to-run-flower-using-docker.rst:27 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:104 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -"为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " -"映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" - -#: ../../source/how-to-run-flower-using-docker.rst:32 -#, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" - -#: ../../source/how-to-run-flower-using-docker.rst:35 -#, fuzzy -msgid "Quickstart" -msgstr "快速入门 JAX" -#: ../../source/how-to-run-flower-using-docker.rst:37 +#: ../../source/explanation-flower-architecture.rst:113 #, fuzzy -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "如果您想试用 Flower,可以使用以下命令:" +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:43 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:113 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -"该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的 Docker 镜像。标签包含使用 " -"Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和 Ubuntu " -"22.04。rm \"标记告诉 Docker 在退出后移除容器。" -#: ../../source/how-to-run-flower-using-docker.rst:49 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:116 msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " -"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/explanation-flower-architecture.rst:125 #, fuzzy -msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." -msgstr "" -"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` 映射到容器的端口 " -"``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` 上访问 Driver API,在 " -"``http://localhost:9092`` 上访问 Fleet API。最后,标签后面的任何标志都会传递给 Flower " -"服务器。在这里,我们传递的标志是 ``--insecure`` 。" +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:125 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " -"`_。" - -#: ../../source/how-to-run-flower-using-docker.rst:65 -#, fuzzy -msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" - -#: ../../source/how-to-run-flower-using-docker.rst:72 -#, fuzzy -msgid "Mounting a volume to store the state on the host system" -msgstr "在主机系统上挂载卷以存储状态" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/explanation-flower-architecture.rst:129 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:82 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:132 msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" -" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" -" 来指定数据库文件的名称。" -#: ../../source/how-to-run-flower-using-docker.rst:95 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -"服务器一启动,就会在主机系统的用户主目录下创建文件 " -"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" - -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -#, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "启用 SSL 连接" - -#: ../../source/how-to-run-flower-using-docker.rst:102 -#, fuzzy -msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/how-to-run-flower-using-docker.rst:106 -#, fuzzy -msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/how-to-run-flower-using-docker.rst:110 -#, fuzzy -msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." +#: ../../source/explanation-flower-architecture.rst:151 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/how-to-run-flower-using-docker.rst:128 +#: ../../source/explanation-flower-architecture.rst:156 msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 -#, fuzzy -msgid "Flower SuperNode" -msgstr "Flower 服务器" - -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-flower-architecture.rst:161 #, fuzzy msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." -msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/how-to-run-flower-using-docker.rst:141 -#, fuzzy -msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." -msgstr "" -"超级节点 Docker 映像目前仅适用于 1.9.0-nightly 版本。稳定版将在 Flower " -"1.9.0(稳定版)发布时推出(预计发布时间:5 " -"月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 模板" -#: ../../source/how-to-run-flower-using-docker.rst:147 -#, fuzzy -msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." -msgstr "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 ClientApp 进行 docker 化。" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "目录" -#: ../../source/how-to-run-flower-using-docker.rst:155 -#, fuzzy -msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." -msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[目录](#table-of-contents)" -#: ../../source/how-to-run-flower-using-docker.rst:159 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "**叉花仓库**" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[总结](#summary)" -#: ../../source/how-to-run-flower-using-docker.rst:173 -#, fuzzy -msgid "Creating a SuperNode Dockerfile" -msgstr "创建超级节点 Dockerfile" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[动机](#motivation)" -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -#, fuzzy -msgid "Let's assume the following project layout:" -msgstr "假设项目布局如下" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[目标](#goals)" -#: ../../source/how-to-run-flower-using-docker.rst:184 -#, fuzzy -msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." -msgstr "" -"首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 ``requirements.txt`` 文件。在该文件中,我们列出了 " -"ClientApp 需要的所有依赖项。" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[非目标](#non-goals)" -#: ../../source/how-to-run-flower-using-docker.rst:196 -#, fuzzy -msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." -msgstr "" -"请注意,`flwr `__ " -"已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[计划](#proposal)" -#: ../../source/how-to-run-flower-using-docker.rst:200 -#, fuzzy -msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." -msgstr "" -"接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch`` 示例,请在 ``examples" -"/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.supernode`` 的新文件。" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[缺点](#drawbacks)" -#: ../../source/how-to-run-flower-using-docker.rst:203 -#, fuzzy -msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." -msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[备选方案](#alternatives-considered)" -#: ../../source/how-to-run-flower-using-docker.rst:217 -#, fuzzy -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." -msgstr "" -"在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 SuperNode 镜像作为基础镜像,并将工作目录设置为 " -"``/app``。下面的指令将在 ``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " -"文件复制到映像中并运行 ``pip install`` 来安装 ClientApp 依赖项。最后两行,我们将 ``client.py`` " -"模块复制到映像中,并将入口点设置为 ``flower-client-app``,参数为 ``client:app``。参数是将在 " -"ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[附录](#appendix)" -#: ../../source/how-to-run-flower-using-docker.rst:226 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "启动服务器" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "总结" -#: ../../source/how-to-run-flower-using-docker.rst:228 -#, fuzzy -msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." -msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 句子 1: 问题概括\\]" -#: ../../source/how-to-run-flower-using-docker.rst:235 -#, fuzzy -msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "我们将图像命名为 ``flwr_supernode``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 句子 2: 解决方案概括\\]" -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "启动服务器" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "动机" -#: ../../source/how-to-run-flower-using-docker.rst:242 -#, fuzzy -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -#, fuzzy -msgid "Let's break down each part of this command:" -msgstr "让我们来分析一下这条命令的各个部分:" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "目标" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -#, fuzzy -msgid "``docker run``: This is the command to run a new Docker container." -msgstr "`docker run``: 这是运行新 Docker 容器的命令。" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "非目标" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 -#, fuzzy -msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." -msgstr "`-rm``: 该选项指定容器停止时应自动移除。" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "提案" -#: ../../source/how-to-run-flower-using-docker.rst:254 -#, fuzzy -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "缺点" -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -#, fuzzy -msgid "``--insecure``: This option enables insecure communication." -msgstr "不安全\": 该选项启用不安全通信。" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "备选方案" -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy -msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" -msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[备选 1\\]" -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[备选 2\\]" -#: ../../source/how-to-run-flower-using-docker.rst:269 -#, fuzzy -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." -msgstr "" -"要测试在本地运行 Flower,可以创建一个 \"桥接网络 `__\",使用\"--网络 " -"\"参数并传递 Docker 网络的名称,以运行超级节点。" - -#: ../../source/how-to-run-flower-using-docker.rst:273 -#, fuzzy -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" -msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower 改善文档" -#: ../../source/how-to-run-flower-using-docker.rst:283 -#, fuzzy -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." -msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[增强文档模版](#enhancement-doc-template)" -#: ../../source/how-to-run-flower-using-docker.rst:285 -#, fuzzy -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." -msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[描述数据](#metadata)" -#: ../../source/how-to-run-flower-using-docker.rst:297 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Flower 服务器。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[工作流程](#workflow)" -#: ../../source/how-to-run-flower-using-docker.rst:299 -#, fuzzy -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub 问题](#github-issues)" -#: ../../source/how-to-run-flower-using-docker.rst:301 -#, fuzzy -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." -msgstr "" -"与 SuperNode 映像类似,ServerApp Docker 映像也预装了 Flower 版本,可作为构建自己的 ServerApp " -"映像的基础。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[谷歌文档](#google-docs)" -#: ../../source/how-to-run-flower-using-docker.rst:304 -#, fuzzy -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." -msgstr "" -"我们将使用与 \"Flower SuperNode \"部分相同的 \"quickstart-pytorch " -"\"示例。如果您还没有这样做,请在继续之前遵循 \"SuperNode 先决条件\"。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" -#: ../../source/how-to-run-flower-using-docker.rst:309 -#, fuzzy -msgid "Creating a ServerApp Dockerfile" -msgstr "创建 ServerApp Dockerfile" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "为提出更大规模的改动提供一个共同的结构" -#: ../../source/how-to-run-flower-using-docker.rst:320 -#, fuzzy -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" -"首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用 ``quickstart-" -"pytorch`` 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为 " -"``Dockerfile.serverapp`` 的新文件。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "确保改动的动机明确" -#: ../../source/how-to-run-flower-using-docker.rst:324 -#, fuzzy -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "将项目信息保存在版本控制系统中" -#: ../../source/how-to-run-flower-using-docker.rst:335 -#, fuzzy -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." -msgstr "" -"在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 ServerApp 镜像作为基础镜像,并将工作目录设置为 " -"``/app``。下面的指令将在 ``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " -"模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 ``server:app``。参数是将在 " -"ServerApp 容器内运行的 ServerApp 的对象引用(``<模块>:<属性>``)。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "记录面向用户的具有影响力的改动的动机" -#: ../../source/how-to-run-flower-using-docker.rst:343 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "启动服务器" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "保留 GitHub 问题,用于跟踪进行中的工作" -#: ../../source/how-to-run-flower-using-docker.rst:345 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" +msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" -#: ../../source/how-to-run-flower-using-docker.rst:352 -#, fuzzy -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "因此,\"增强文件\"将以下方面结合起来" -#: ../../source/how-to-run-flower-using-docker.rst:357 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "启动服务器" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "一个功能和效力跟踪文档" -#: ../../source/how-to-run-flower-using-docker.rst:359 -#, fuzzy -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "一个产品需要文档" -#: ../../source/how-to-run-flower-using-docker.rst:371 -#, fuzzy -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "一个设计文档" -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" -msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "该文件是与社区合作逐步创建的。" -#: ../../source/how-to-run-flower-using-docker.rst:385 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -"要测试在本地运行 Flower,可以创建一个 ``bridge network `___,使用 " -"``--network`` 参数并传递 Docker 网络的名称,以运行 ServerApps。" +"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " +"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/how-to-run-flower-using-docker.rst:389 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." +msgstr "" +"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " +"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" -#: ../../source/how-to-run-flower-using-docker.rst:399 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." +msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" -#: ../../source/how-to-run-flower-using-docker.rst:401 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." -msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" - -#: ../../source/how-to-run-flower-using-docker.rst:412 -#, fuzzy -msgid "Advanced Docker options" -msgstr "高级安装选项" - -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" -msgstr "" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." +msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" -#: ../../source/how-to-run-flower-using-docker.rst:417 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" +"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " +"\"的工作或使用方式。" -#: ../../source/how-to-run-flower-using-docker.rst:424 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" -msgstr "" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +msgid "" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." +msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "增强文档模板" -#: ../../source/how-to-run-flower-using-docker.rst:434 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." -msgstr "" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" -#: ../../source/how-to-run-flower-using-docker.rst:454 -#, fuzzy -msgid "Using a different Flower version" -msgstr "使用不同的 Flower 或 Python 版本" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" -#: ../../source/how-to-run-flower-using-docker.rst:456 -#, fuzzy -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" -"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " -"`_ 上找到。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "标题(与描述数据中的标题相同)" -#: ../../source/how-to-run-flower-using-docker.rst:460 -#, fuzzy -msgid "Pinning a Docker image to a specific version" -msgstr "将 Docker 映像固定到特定版本" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "目录(如有需要)" -#: ../../source/how-to-run-flower-using-docker.rst:462 -#, fuzzy -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." -msgstr "" -"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " -"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "注意事项/限制/警告(可选)" -#: ../../source/how-to-run-flower-using-docker.rst:467 -#, fuzzy -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" -msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "设计细节(可选)" -#: ../../source/how-to-run-flower-using-docker.rst:474 -#, fuzzy -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "毕业标准" -#: ../../source/how-to-run-flower-using-docker.rst:483 -#, fuzzy -msgid "Setting environment variables" -msgstr "设置编码环境" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "升级/降级策略(如适用)" -#: ../../source/how-to-run-flower-using-docker.rst:485 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "作为参考,本文件采用上述结构。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "描述数据" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." +msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "运行模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**标题** (必填)用简明语言写出提案的标题。" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." -msgstr "" -"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" -" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " -"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" - -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" -msgstr "" -":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" -" `_启动的客户端),因为它们可以通过创建一个继承自 " -"`flwr.client.NumPyClient `_ " -"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." -msgstr "" -"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " -"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." -msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." -msgstr "" -"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " -")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" -":code:`VirtualClientEngine`使用`Ray " -"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" -" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**另见** (可选)与本提案相关的其他提案清单。" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "启动 Flower 模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**取代**(可选) 这份提案所取代的提案列表。" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" -msgstr "" -"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " -"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**被取代者** (可选) 此提案取代的提案列表。" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "虚拟客户端引擎资源" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "工作流程" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." -msgstr "" -"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " -"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " -":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" -" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " -"GPU,请不要设置 :code:`ray_init_args`。" - -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "分配客户端资源" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." +msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " -"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" +"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " +"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " +"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " -"`client_resources` 设置为 `start_simulation `_ 。Ray " -"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" +"一旦增强功能通过审核和批准,其状态就会变为 " +"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +msgid "" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" -#: ../../source/how-to-run-simulations.rst:68 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "让我们来看几个例子:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`可实施`: 增强功能已审核通过。" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" -"虽然 :code:`client_resources` 可用来控制 FL " -"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " -"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " -"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " -"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`已实施`: 增强功能已实施,不再主动更改。" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." -msgstr "" -"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " -"`_。" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" -msgstr "模拟示例" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`撤回`: 作者已撤回增强功能。" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`已替换`: 增强功能已被新的增强功能取代。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" -msgstr "" -"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " -"`_ 中提供。您也可以在 Google Colab 上运行它们:" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." +msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" -"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." +msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 问题" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" +"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " +"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " +"问题时,管理这些多重讨论会很混乱。" -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "多节点 Flower 模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "谷歌文档" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." +msgstr "" +"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" +" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "所有节点都有相同的 Python 环境。" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 增强文件" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "整合评估结果" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" -msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." -msgstr "" -"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " -":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "自定义整合评估结果" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." -msgstr "" -"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" -"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" +"The same :code:`Strategy`-customization approach can be used to aggregate" +" custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/how-to-aggregate-evaluation-results.rst:36 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" -msgstr "" -"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " -"--address='192.168.1.132:6379'`" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" +#: ../../source/how-to-authenticate-supernodes.rst:2 +#, fuzzy +msgid "Authenticate SuperNodes" +msgstr "验证超级节点" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/how-to-authenticate-supernodes.rst:4 +#, fuzzy msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" +msgstr "" +"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " +"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "了解多节点模拟" +#: ../../source/how-to-authenticate-supernodes.rst:7 +#, fuzzy +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-authenticate-supernodes.rst:8 +#, fuzzy msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-authenticate-supernodes.rst:9 +#, fuzzy msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." -msgstr "" -"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " -"可用的总资源。" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-authenticate-supernodes.rst:10 +#, fuzzy +msgid "SuperLink verifies the token" +msgstr "超级链接验证令牌" + +#: ../../source/how-to-authenticate-supernodes.rst:12 +#, fuzzy msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" -" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" -" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" -"gpus=`" +"请参阅`完整代码示例 " +"`_了解更多信息。" -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "模拟的注意事项" +#: ../../source/how-to-authenticate-supernodes.rst:15 +#, fuzzy +msgid "" +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-authenticate-supernodes.rst:18 +#, fuzzy msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-authenticate-supernodes.rst:21 +#, fuzzy +msgid "Enable node authentication in :code:`SuperLink`" +msgstr "在 :code:`SuperLink` 中启用节点验证" + +#: ../../source/how-to-authenticate-supernodes.rst:23 +#, fuzzy msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower " +":code:`SuperLink`. Use the following terminal command to start a Flower " +":code:`SuperNode` that has both secure connections and node " +"authentication enabled:" msgstr "" -"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" -" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " -"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" +"要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " +"`_ " +"找到完整的指南。配置安全连接后,您就可以在长期运行的 Flower " +":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " +":code:`SuperNode`:" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "GPU 资源" +#: ../../source/how-to-authenticate-supernodes.rst:38 +#, fuzzy +msgid "Let's break down the authentication flags:" +msgstr "让我们来分析一下身份验证标志:" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-authenticate-supernodes.rst:40 +#, fuzzy msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " +"file storing all known node public keys. You need to store all known node" +" public keys that are allowed to participate in a federation in one CSV " +"file (:code:`.csv`)." msgstr "" -"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " -"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" +"第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " +"CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/how-to-authenticate-supernodes.rst:42 +#, fuzzy msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " -"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" +"存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " +"格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-authenticate-supernodes.rst:44 +#, fuzzy msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" +"The second and third flags :code:`--auth-superlink-private-key` and :code" +":`--auth-superlink-public-key` expect paths to the server's private and " +"public keys. For development purposes, you can generate a private and " +"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +msgstr "" +"第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" +"public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " +"384` 生成一对私钥和公钥。" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-authenticate-supernodes.rst:47 +#, fuzzy msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" +"在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " +"文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-authenticate-supernodes.rst:53 +#, fuzzy +msgid "Enable node authentication in :code:`SuperNode`" +msgstr "在 :code:`SuperNode` 中启用节点验证" + +#: ../../source/how-to-authenticate-supernodes.rst:55 +#, fuzzy msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"Similar to the long-running Flower server (:code:`SuperLink`), you can " +"easily enable node authentication in the long-running Flower client " +"(:code:`SuperNode`). Use the following terminal command to start an " +"authenticated :code:`SuperNode`:" msgstr "" -"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " -":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" +"与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " +"客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-authenticate-supernodes.rst:66 +#, fuzzy msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"The :code:`--auth-supernode-private-key` flag expects a path to the " +"node's private key file and the :code:`--auth-supernode-public-key` flag " +"expects a path to the node's public key file. For development purposes, " +"you can generate a private and public key pair using :code:`ssh-keygen -t" +" ecdsa -b 384`." msgstr "" -"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" -" VRAM 超过启动模拟时指定的比例。" +":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" +"public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " +"生成一对私钥和公钥。" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "使用 GPU 的 TensorFlow" +#: ../../source/how-to-authenticate-supernodes.rst:70 +#, fuzzy +msgid "Security notice" +msgstr "安全通知" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-authenticate-supernodes.rst:72 +#, fuzzy msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" -" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " -"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " -"`_来禁用这一默认行为。" +"系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " +"假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-authenticate-supernodes.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:68 +#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "总结" + +#: ../../source/how-to-authenticate-supernodes.rst:79 +#, fuzzy msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"You should now have learned how to start a long-running Flower server " +"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " +"authentication enabled. You should also know the significance of the " +"private key and store it safely to minimize security risks." msgstr "" -"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " -":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" -" TF 工作负载的 GPU 增长,它看起来如下:" +"现在,您应该已经学会了如何启动长期运行的 Flower " +"服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "配置客户端" + +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." -msgstr "" -"这正是 \"Tensorflow/Keras 模拟 " -"`_\"示例中使用的机制。" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." +msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "多节点设置" +#: ../../source/how-to-configure-clients.rst:7 +msgid "Configuration values" +msgstr "配置值" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-configure-clients.rst:9 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"VCE 目前不提供控制特定 \"虚拟 " -"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " -"FL " -"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" -" nfs 或数据库)来避免数据重复。" +"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " +"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-configure-clients.rst:20 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " -"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" -" \"状态\"。" +"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " +"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "保存和加载模型检查点" +#: ../../source/how-to-configure-clients.rst:24 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." +msgstr "" +"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " +"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-configure-clients.rst:26 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." -msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." +msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "模型检查点" +#: ../../source/how-to-configure-clients.rst:30 +msgid "Configuration through built-in strategies" +msgstr "通过内置策略进行配置" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-configure-clients.rst:32 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" +"called configuration functions. A configuration function is a function " +"that the built-in strategy calls to get the configuration dictionary for " +"the current round. It then forwards the configuration dictionary to all " +"the clients selected during that round." msgstr "" -"模型更新可通过自定义 :code:`Strategy` " -"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " -":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " -"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " -":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "保存和加载 PyTorch 检查点" +"向客户端发送配置值的最简单方法是使用内置策略,如 " +":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-configure-clients.rst:34 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." -msgstr "" -"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " -"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " -"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" +msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-configure-clients.rst:47 msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" -msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +":code:`on_fit_config_fn`:" +msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:56 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" + +#: ../../source/how-to-configure-clients.rst:67 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " -"``initial_parameters` 中。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "升级至 Flower 1.0" +"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " +"发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-configure-clients.rst:69 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " -"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" +"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " +"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " +"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "安装更新" +#: ../../source/how-to-configure-clients.rst:82 +msgid "The :code:`FedAvg` strategy will call this function *every round*." +msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 -msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" +#: ../../source/how-to-configure-clients.rst:85 +msgid "Configuring individual clients" +msgstr "配置个别客户端" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip: 安装时添加 ``-U``." +#: ../../source/how-to-configure-clients.rst:87 +msgid "" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-configure-clients.rst:89 +#, fuzzy msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" -msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" +" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" +" to the config dict of a *single client* (only the first client in the " +"list, the other clients in this round to not receive this \"special\" " +"config value):" +msgstr "" +"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " +"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " +"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "配置日志记录" + +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" -msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" +msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"containing relevant information including: log message level (e.g. " +":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " +"took place from, as well as the log message itself. In this way, the " +"logger would typically display information on your terminal as follows:" msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"相关信息包括:日志信息级别(例如 " +":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" +#: ../../source/how-to-configure-logging.rst:34 +msgid "Saving log to file" +msgstr "将日志保存到文件" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-configure-logging.rst:36 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do :code:`fl.server.start_server`) and when " +"using the :code:`VirtualClientEngine` (i.e. when you do " +":code:`fl.simulation.start_simulation`). In some situations you might " +"want to save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " -"}``(当使用``start_simulation``时)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "所需变更" +"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " +":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " +"时(即执行 :code:`fl.simulation.start_simulation` " +"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " +"`_" +" 函数。例如:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "以下更改需要手动更新。" +#: ../../source/how-to-configure-logging.rst:53 +msgid "" +"With the above, Flower will record the log you see on your terminal to " +":code:`log.txt`. This file will be created in the same directory as were " +"you are running the code from. If we inspect we see the log above is also" +" recorded but prefixing with :code:`identifier` each line:" +msgstr "" +"通过上述操作,Flower 会将您在终端上看到的日志记录到 " +":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " +":code:`identifier` 作为前缀:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "一般情况" +#: ../../source/how-to-configure-logging.rst:74 +msgid "Log your own messages" +msgstr "记录自己的信息" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-configure-logging.rst:76 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" -msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." +msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-configure-logging.rst:102 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" -msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-configure-logging.rst:128 +msgid "Log to a remote service" +msgstr "登录远程服务" + +#: ../../source/how-to-configure-logging.rst:130 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"The :code:`fl.common.logger.configure` function, also allows specifying a" +" host to which logs can be pushed (via :code:`POST`) through a native " +"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" +" feature in :code:`gRPC`-based Federated Learning workloads where " +"otherwise gathering logs from all entities (i.e. the server and the " +"clients) might be cumbersome. Note that in Flower simulation, the server " +"automatically displays all logs. You can still specify a " +":code:`HTTPHandler` should you wish to backup or analyze the logs " +"somewhere else." msgstr "" -"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " +":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " +":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " +"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "客户端" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "启用 SSL 连接" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" -msgstr "" -"NumPyClient的子类:将``def get_parameters(self):```改为``def " -"get_parameters(self,config):``" +"This guide describes how to a SSL-enabled secure Flower server " +"(:code:`SuperLink`) can be started and how a Flower client " +"(:code:`SuperNode`) can establish a secure connections to it." +msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-enable-ssl-connections.rst:7 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " -"GetParametersIns):\"" +"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "策略 / ``start_server`` / ``start_simulation``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-enable-ssl-connections.rst:10 +#, fuzzy msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" -msgstr "" -"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " -"dictionary)。下面是一个例子:" +"The code example comes with a :code:`README.md` file which explains how " +"to start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." +msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "证书" + +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in :code:`examples/advanced-" +"tensorflow/certificates/generate.sh` with the following command sequence:" msgstr "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " +":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"This will generate the certificates in :code:`examples/advanced-" +"tensorflow/.cache/certificates`." +msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-enable-ssl-connections.rst:31 +#, fuzzy msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" -msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." +msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-enable-ssl-connections.rst:39 +#, fuzzy +msgid "Server (SuperLink)" +msgstr "flower-superlink" + +#: ../../source/how-to-enable-ssl-connections.rst:41 +#, fuzzy msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." -msgstr "" -"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " -"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "重命名参数/数组转换函数:" +#: ../../source/how-to-enable-ssl-connections.rst:50 +#, fuzzy +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-enable-ssl-connections.rst:54 +#, fuzzy +msgid "Client (SuperNode)" +msgstr "客户端状态代码。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-enable-ssl-connections.rst:56 +#, fuzzy +msgid "" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-enable-ssl-connections.rst:64 +#, fuzzy msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"When setting :code:`root_certificates`, the client expects a file path to" +" PEM-encoded root certificates." msgstr "" -"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " -"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " -"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " -"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" +"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " +":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "重命名内置策略参数(例如,`FedAvg``):" +#: ../../source/how-to-enable-ssl-connections.rst:70 +#, fuzzy +msgid "" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." +msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" +#: ../../source/how-to-enable-ssl-connections.rst:75 +msgid "Additional resources" +msgstr "补充资源" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" +#: ../../source/how-to-enable-ssl-connections.rst:77 +msgid "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-enable-ssl-connections.rst:79 +msgid "`Let's Encrypt `_" +msgstr "`让我们加密 `_" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 -msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." -msgstr "" -"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " -"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" -" 和 ``evaluate_fn``。" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "`certbot `_" +msgstr "`certbot `_" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "实施策略" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" +" 提供了一些内置策略,这些策略基于下文所述的相同 API。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The :code:`Strategy` abstraction" +msgstr ":code:`策略 ` 抽象类" + +#: ../../source/how-to-implement-strategies.rst:13 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"All strategy implementation are derived from the abstract base class " +":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "定制策略" +"所有策略实现均源自抽象基类 " +":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" -msgstr "" -"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," -" FitRes], " -"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " -"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "策略抽象定义了一些需要实现的抽象方法:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-implement-strategies.rst:75 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" -msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" +"Creating a new strategy means implementing a new :code:`class` (derived " +"from the abstract base class :code:`Strategy`) that implements for the " +"previously shown abstract methods:" +msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-implement-strategies.rst:100 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 服务器按以下顺序调用这些方法:" + +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The following sections describe each of those methods in more detail." +msgstr "下文将详细介绍每种方法。" + +#: ../../source/how-to-implement-strategies.rst:180 +msgid "The :code:`initialize_parameters` method" +msgstr ":code:`初始化参数` 方法" + +#: ../../source/how-to-implement-strategies.rst:182 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +":code:`initialize_parameters` is called only once, at the very beginning " +"of an execution. It is responsible for providing the initial global model" +" parameters in a serialized form (i.e., as a :code:`Parameters` object)." msgstr "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:```" +":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " +":code:`Parameters` 对象)提供初始全局模型参数。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-implement-strategies.rst:184 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "可选的改进措施" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +":code:`FedAvg`:" +msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-implement-strategies.rst:209 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" -msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" +"The Flower server will call :code:`initialize_parameters`, which either " +"returns the parameters that were passed to :code:`initial_parameters`, or" +" :code:`None`. If no parameters are returned from " +":code:`initialize_parameters` (i.e., :code:`None`), the server will " +"randomly select one client and ask it to provide its parameters. This is " +"a convenience feature and not recommended in practice, but it can be " +"useful for prototyping. In practice, it is recommended to always use " +"server-side parameter initialization." +msgstr "" +"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " +":code:`initial_parameters` 的参数或 :code:`None`。如果 " +":code:`initialize_parameters` 没有返回任何参数(即 " +":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-implement-strategies.rst:213 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." -msgstr "" -"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " -"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." +msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-implement-strategies.rst:216 +msgid "The :code:`configure_fit` method" +msgstr ":code:`configure_fit`方法" + +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +":code:`configure_fit` is responsible for configuring the upcoming round " +"of training. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_fit` makes this clear:" msgstr "" -"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +":code:`configure_fit` " +"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" +" 说明了这一点:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "更多帮助" +#: ../../source/how-to-implement-strategies.rst:231 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_fit`:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-implement-strategies.rst:233 +#: ../../source/how-to-implement-strategies.rst:280 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"Use the :code:`client_manager` to randomly sample all (or a subset of) " +"available clients (each represented as a :code:`ClientProxy` object)" msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " +"对象)" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -#, fuzzy -msgid "Upgrade to Flower Next" -msgstr "升级至 Flower 1.0" +#: ../../source/how-to-implement-strategies.rst:234 +msgid "" +"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" +msgstr "" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`FitIns` 配对" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-implement-strategies.rst:236 #, fuzzy msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"More sophisticated implementations can use :code:`configure_fit` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_fit`." msgstr "" -"欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " -"Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" +"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:240 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." msgstr "" -"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " -"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" +" :code:`config` dict)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -#, fuzzy -msgid "Let's dive in!" -msgstr "让我们深入了解一下!" +#: ../../source/how-to-implement-strategies.rst:243 +msgid "The :code:`aggregate_fit` method" +msgstr ":code:`aggregate_fit` 方法" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -#, fuzzy -msgid "or if you need Flower Next with simulation:" -msgstr "启动 Flower 模拟" +":code:`aggregate_fit` is responsible for aggregating the results returned" +" by the clients that were selected and asked to train in " +":code:`configure_fit`." +msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:258 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" -msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." +msgstr "" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " +":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -#, fuzzy -msgid "or ``pyproject.toml``:" -msgstr "或 ``pyproject.toml```:" +#: ../../source/how-to-implement-strategies.rst:260 +msgid "" +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" +" dictionary of aggregated metrics. The :code:`Parameters` return value is" +" optional because :code:`aggregate_fit` might decide that the results " +"provided are not sufficient for aggregation (e.g., too many failures)." +msgstr "" +":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " +"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " +"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -#, fuzzy -msgid "Using Poetry" -msgstr "使用 pip" +#: ../../source/how-to-implement-strategies.rst:263 +msgid "The :code:`configure_evaluate` method" +msgstr ":code:`configure_evaluate`方法" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:265 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +":code:`configure_evaluate` is responsible for configuring the upcoming " +"round of evaluation. What does *configure* mean in this context? " +"Configuring a round means selecting clients and deciding what " +"instructions to send to these clients. The signature of " +":code:`configure_evaluate` makes this clear:" msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +":code:`configure_evaluate` " +"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" +" 说明了这一点:" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:278 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_evaluate`:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:281 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " +"the current global model :code:`parameters` and :code:`config` dict" msgstr "" -"在 Flower Next " -"中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" -" |serverapp_link|_ " -"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" -" Flower Next 方式运行项目:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -#, fuzzy -msgid "|clientapp_link|_" -msgstr "客户端" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`EvaluateIns` 配对" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-implement-strategies.rst:283 #, fuzzy msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" -msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" +"More sophisticated implementations can use :code:`configure_evaluate` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"list returned from :code:`configure_evaluate`." +msgstr "" +"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -#, fuzzy -msgid "|serverapp_link|_" -msgstr "服务器" - -#: ../../source/how-to-upgrade-to-flower-next.rst:133 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:287 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" -msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." +msgstr "" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" +" :code:`config` dict)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -#, fuzzy -msgid "Deployment" -msgstr "调配" +#: ../../source/how-to-implement-strategies.rst:291 +msgid "The :code:`aggregate_evaluate` method" +msgstr ":code:`aggregate_evaluate` 方法" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:293 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +":code:`aggregate_evaluate` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +":code:`configure_evaluate`." msgstr "" -"在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " -"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " -"`server.py` 作为 Python 脚本执行。" +":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " +"中选择并要求评估的客户端返回的结果。" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:306 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" -msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " +"receives a list of :code:`results`, but also a list of :code:`failures`." +msgstr "" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " +":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " +":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:308 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." -msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" +" dictionary of aggregated metrics. The :code:`float` return value is " +"optional because :code:`aggregate_evaluate` might decide that the results" +" provided are not sufficient for aggregation (e.g., too many failures)." +msgstr "" +":code:`aggregate_evaluate` 返回一个可选的 " +":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " +":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -#, fuzzy -msgid "Simulation in CLI" -msgstr "运行模拟" +#: ../../source/how-to-implement-strategies.rst:311 +msgid "The :code:`evaluate` method" +msgstr ":code:`evaluate`方法" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:313 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +":code:`evaluate` is responsible for evaluating model parameters on the " +"server-side. Having :code:`evaluate` in addition to " +":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " +"to perform both servers-side and client-side (federated) evaluation." msgstr "" -"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " -"|startsim_link|_。下面是一个示例:" +":code:`evaluate` 负责在服务器端评估模型参数。除了 " +":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" +" 可以使策略同时执行服务器端和客户端(联邦)评估。" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:323 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +":code:`evaluate` method might not complete successfully (e.g., it might " +"fail to load the server-side evaluation data)." msgstr "" -"在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " -"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " -"`client_app`` 对象位于 `sim.py`` 模块中):" +"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " +"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 -#, fuzzy -msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" -msgstr "" -"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " -"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "安装Flower" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -#, fuzzy -msgid "Simulation in a Notebook" -msgstr "笔记本中的模拟" +#: ../../source/how-to-install-flower.rst:6 +msgid "Python version" +msgstr "Python 版本" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 -#, fuzzy -msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" -msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" +#: ../../source/how-to-install-flower.rst:12 +msgid "Install stable release" +msgstr "安装稳定版" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-install-flower.rst:15 +#: ../../source/how-to-upgrade-to-flower-next.rst:46 #, fuzzy -msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." -msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +msgid "Using pip" +msgstr "使用 pip" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -#, fuzzy -msgid "Important" -msgstr "重要变更:" +#: ../../source/how-to-install-flower.rst:17 +msgid "" +"Stable releases are available on `PyPI " +"`_::" +msgstr "稳定版本可在 `PyPI `_::" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 -#, fuzzy +#: ../../source/how-to-install-flower.rst:21 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra::" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-install-flower.rst:27 #, fuzzy -msgid "Happy migrating! 🚀" -msgstr "移民愉快!🚀" +msgid "Using conda (or mamba)" +msgstr "使用 conda(或 mamba)" -#: ../../source/how-to-use-built-in-mods.rst:2 +#: ../../source/how-to-install-flower.rst:29 #, fuzzy -msgid "Use Built-in Mods" -msgstr "使用内置调制器" +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-install-flower.rst:31 #, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" -msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following::" +msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-install-flower.rst:36 #, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." -msgstr "" -"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" -" 处理任务之前和之后执行操作。" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``::" +msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-install-flower.rst:40 #, fuzzy -msgid "What are Mods?" -msgstr "什么是 Mods?" +msgid "or with ``mamba``::" +msgstr "或用 ``mamba`` ::" -#: ../../source/how-to-use-built-in-mods.rst:11 -#, fuzzy -msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" -"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " -"``Message`` 。一个 ``Mod`` 的签名如下:" +#: ../../source/how-to-install-flower.rst:46 +msgid "Verify installation" +msgstr "验证安装" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-install-flower.rst:48 #, fuzzy -msgid "A typical mod function might look something like this:" -msgstr "一个典型的修改函数可能是这样的:" +msgid "" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" +msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" -#: ../../source/how-to-use-built-in-mods.rst:31 -#, fuzzy -msgid "Using Mods" -msgstr "使用修改器" +#: ../../source/how-to-install-flower.rst:58 +msgid "Advanced installation options" +msgstr "高级安装选项" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-install-flower.rst:61 #, fuzzy -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" +msgid "Install via Docker" +msgstr "安装Flower" -#: ../../source/how-to-use-built-in-mods.rst:36 +#: ../../source/how-to-install-flower.rst:63 #, fuzzy -msgid "1. Import the required mods" -msgstr "1. 导入所需修改" +msgid ":doc:`Run Flower using Docker `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/how-to-use-built-in-mods.rst:38 -#, fuzzy -msgid "First, import the built-in mod you intend to use:" -msgstr "首先,导入您打算使用的内置模式:" +#: ../../source/how-to-install-flower.rst:66 +msgid "Install pre-release" +msgstr "安装预发布版本" -#: ../../source/how-to-use-built-in-mods.rst:46 -#, fuzzy -msgid "2. Define your client function" -msgstr "2. 定义客户功能" - -#: ../../source/how-to-use-built-in-mods.rst:48 -#, fuzzy +#: ../../source/how-to-install-flower.rst:68 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" - -#: ../../source/how-to-use-built-in-mods.rst:57 -#, fuzzy -msgid "3. Create the ``ClientApp`` with mods" -msgstr "3. 用模块创建 ``ClientApp``" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens::" +msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" -#: ../../source/how-to-use-built-in-mods.rst:59 -#, fuzzy +#: ../../source/how-to-install-flower.rst:72 msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" -msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra::" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" -#: ../../source/how-to-use-built-in-mods.rst:72 -#, fuzzy -msgid "Order of execution" -msgstr "停用" +#: ../../source/how-to-install-flower.rst:77 +msgid "Install nightly release" +msgstr "安装隔夜版本" -#: ../../source/how-to-use-built-in-mods.rst:74 -#, fuzzy +#: ../../source/how-to-install-flower.rst:79 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases::" +msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" -#: ../../source/how-to-use-built-in-mods.rst:76 -#, fuzzy -msgid "``example_mod_1`` (outermost mod)" -msgstr "``example_mod_1`` (最外层模块)" +#: ../../source/how-to-install-flower.rst:83 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra::" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" -#: ../../source/how-to-use-built-in-mods.rst:77 -#, fuzzy -msgid "``example_mod_2`` (next mod)" -msgstr "示例模式 2(下一个模式)" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "监控模拟" -#: ../../source/how-to-use-built-in-mods.rst:78 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" -msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." +msgstr "" +"Flower 允许您在运行模拟时监控系统资源。此外,Flower " +"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" -#: ../../source/how-to-use-built-in-mods.rst:79 -#, fuzzy -msgid "``example_mod_2`` (on the way back)" -msgstr "``example_mod_2`` (返回途中)" +#: ../../source/how-to-monitor-simulation.rst:6 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." +msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" -#: ../../source/how-to-use-built-in-mods.rst:80 -#, fuzzy -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "``example_mod_1`` (返回途中最外层的模式)" +#: ../../source/how-to-monitor-simulation.rst:10 +msgid "Downloads" +msgstr "下载" -#: ../../source/how-to-use-built-in-mods.rst:82 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:16 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." -msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." +msgstr "" +"`Prometheus `_ 用于收集数据,而 `Grafana " +"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " +"`_ 紧密集成。" -#: ../../source/how-to-use-built-in-mods.rst:87 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:18 msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." -msgstr "" -"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " -"的顺序至关重要,它会影响输入和输出的处理方式。" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" -#: ../../source/how-to-use-built-in-mods.rst:89 -#, fuzzy -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" +#: ../../source/how-to-monitor-simulation.rst:20 +msgid "If you are on an M1 Mac, it should be:" +msgstr "如果你使用的是 M1 Mac,应该是这样:" -#: ../../source/how-to-use-differential-privacy.rst:2 -#, fuzzy -msgid "Use Differential Privacy" -msgstr "差分隐私" +#: ../../source/how-to-monitor-simulation.rst:27 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "在上一代英特尔 Mac 设备上,应该是这样:" -#: ../../source/how-to-use-differential-privacy.rst:3 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:34 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." -msgstr "" -"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" -"differential-privacy` 。" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" -#: ../../source/how-to-use-differential-privacy.rst:7 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:44 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." -msgstr "" -"Flower " -"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" -#: ../../source/how-to-use-differential-privacy.rst:12 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:59 msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." -msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" -#: ../../source/how-to-use-differential-privacy.rst:15 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:69 msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." -msgstr "" -"** 服务器端剪切**: " -"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" -#: ../../source/how-to-use-differential-privacy.rst:16 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:84 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." -msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" -#: ../../source/how-to-use-differential-privacy.rst:21 -#, fuzzy -msgid "Server-side Clipping" -msgstr "服务器端逻辑" +#: ../../source/how-to-monitor-simulation.rst:88 +msgid "Tracking metrics" +msgstr "跟踪指标" -#: ../../source/how-to-use-differential-privacy.rst:22 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:90 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." -msgstr "" -"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " -":code:`FedAvg`)的包装器。这两个封装类分别是 " -":code:`DifferentialPrivacyServerSideFixedClipping` 和 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" - -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "server side clipping" -msgstr "服务器端逻辑" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" -#: ../../source/how-to-use-differential-privacy.rst:31 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:97 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." -msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " -":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "开始模拟时,请在 Python 代码中加入以下参数。" -#: ../../source/how-to-use-differential-privacy.rst:52 -#, fuzzy -msgid "Client-side Clipping" -msgstr "客户端逻辑" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "Now, you are ready to start your workload." +msgstr "现在,您可以开始工作了。" -#: ../../source/how-to-use-differential-privacy.rst:53 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:110 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" -"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " -":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " -":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " -":code:`DifferentialPrivacyClientSideFixedClipping` 和 " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "模拟启动后不久,您就会在终端中看到以下日志:" -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "client side clipping" -msgstr "客户端逻辑" +#: ../../source/how-to-monitor-simulation.rst:117 +msgid "You can look at everything at ``_ ." +msgstr "您可以在 ``_ 查看所有内容。" -#: ../../source/how-to-use-differential-privacy.rst:63 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:119 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" -msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " -":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" -#: ../../source/how-to-use-differential-privacy.rst:80 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " -"以执行客户端剪切:" +"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " +"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " +"Grafana。" -#: ../../source/how-to-use-differential-privacy.rst:97 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:123 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." -msgstr "" -"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " -"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port :code:`3000` on " +"your machine as long as they are running." +msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "local DP mod" -msgstr "本地 DP 模式" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "Resource allocation" +msgstr "资源分配" -#: ../../source/how-to-use-differential-privacy.rst:104 -#, fuzzy -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" +#: ../../source/how-to-monitor-simulation.rst:134 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" -#: ../../source/how-to-use-differential-privacy.rst:122 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:136 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" +"最初,模拟(由 Ray " +"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#: ../../source/how-to-use-differential-privacy.rst:125 -#, fuzzy -msgid "Local Training using Privacy Engines" -msgstr "使用隐私引擎进行本地培训" +#: ../../source/how-to-monitor-simulation.rst:143 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "在 Google Colab 中,您看到的结果可能与此类似:" -#: ../../source/how-to-use-differential-privacy.rst:126 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:155 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." -msgstr "" -"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" -" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " -"`_, `Tensorflow" -" Privacy `_)。" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "使用策略" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "Let’s also specify the resource for a single client." +msgstr "我们还可以为单个客户指定资源。" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-monitor-simulation.rst:205 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." +msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-monitor-simulation.rst:207 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " +"running two clients and therefore enable them to run concurrently. Be " +"careful not to require more resources than available. If you specified " +":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " +"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +msgstr "" +"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " +"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " +"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "使用现有策略,例如 :code:`FedAvg`" +#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "常见问题" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "使用回调函数定制现有策略" +#: ../../source/how-to-monitor-simulation.rst:214 +msgid "Q: I don't see any metrics logged." +msgstr "问:我没有看到任何指标记录。" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "实施新策略" +#: ../../source/how-to-monitor-simulation.rst:216 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." +msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "使用现有策略" +#: ../../source/how-to-monitor-simulation.rst:218 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." +msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-monitor-simulation.rst:220 msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" -msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-monitor-simulation.rst:226 msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" -msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" +"Q: I see \"This site can't be reached\" when going to " +"``_." +msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-monitor-simulation.rst:228 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." -msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "配置客户匹配和客户评估" +#: ../../source/how-to-monitor-simulation.rst:232 +msgid "Resources" +msgstr "资源" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-monitor-simulation.rst:234 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"Ray Dashboard: ``_" +msgstr "Ray 仪表盘: ``_" + +#: ../../source/how-to-monitor-simulation.rst:236 +#, fuzzy +msgid "Ray Metrics: ``_" msgstr "" -"服务器可以通过向 :code:`on_fit_config_fn` " -"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" -" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" +"Ray 指标: ``_" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "运行模拟" + +#: ../../source/how-to-run-simulations.rst:8 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " -":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" +" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " +"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-run-simulations.rst:10 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +":code:`VirtualClientEngine` are:" msgstr "" -"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " -":code:`client.evaluate()` 的配置" +":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" +" `_启动的客户端),因为它们可以通过创建一个继承自 " +"`flwr.client.NumPyClient `_ " +"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "配置服务器端评估" +#: ../../source/how-to-run-simulations.rst:12 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " +"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-run-simulations.rst:13 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." -msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." +msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-run-simulations.rst:14 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " -"指南,了解更多信息。" - -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "教程" - -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "快速入门教程" - -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "操作指南" - -#: ../../source/index.rst:99 -msgid "Legacy example guides" -msgstr "旧版指南范例" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " +")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "说明" +#: ../../source/how-to-run-simulations.rst:16 +msgid "" +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." +msgstr "" +":code:`VirtualClientEngine`使用`Ray " +"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" +" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" -#: None:-1 -msgid "API reference" -msgstr "应用程序接口参考" +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" +msgstr "启动 Flower 模拟" -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "参考文档" +#: ../../source/how-to-run-simulations.rst:22 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" +msgstr "" +"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " +"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" -msgstr "贡献者教程" +#: ../../source/how-to-run-simulations.rst:44 +msgid "VirtualClientEngine resources" +msgstr "虚拟客户端引擎资源" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" -msgstr "投稿指南" +#: ../../source/how-to-run-simulations.rst:45 +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +":code:`ray_init_args` input argument to :code:`start_simulation` which " +"the VCE internally passes to Ray's :code:`ray.init` command. For a " +"complete list of settings you can configure check the `ray.init " +"`_" +" documentation. Do not set :code:`ray_init_args` if you want the VCE to " +"use all your system's CPUs and GPUs." +msgstr "" +"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " +"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " +":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" +" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " +"GPU,请不要设置 :code:`ray_init_args`。" -#: ../../source/index.rst:172 -msgid "Contributor explanations" -msgstr "贡献者解释" +#: ../../source/how-to-run-simulations.rst:62 +msgid "Assigning client resources" +msgstr "分配客户端资源" -#: ../../source/index.rst:178 -msgid "Contributor references" -msgstr "贡献者参考资料" +#: ../../source/how-to-run-simulations.rst:63 +msgid "" +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" +" nothing else) to each virtual client. This means that if your system has" +" 10 cores, that many virtual clients can be concurrently running." +msgstr "" +"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " +"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" -#: ../../source/index.rst:-1 +#: ../../source/how-to-run-simulations.rst:65 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " +"`client_resources` 设置为 `start_simulation `_ 。Ray " +"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "Flower 框架文档" +#: ../../source/how-to-run-simulations.rst:67 +msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" -#: ../../source/index.rst:7 +#: ../../source/how-to-run-simulations.rst:68 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." -msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" +":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " +"assigned." +msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "加入 Flower 社区" +#: ../../source/how-to-run-simulations.rst:70 +msgid "Let's see a few examples:" +msgstr "让我们来看几个例子:" -#: ../../source/index.rst:13 +#: ../../source/how-to-run-simulations.rst:89 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." -msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" +"While the :code:`client_resources` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +":code:`VirtualClientEngine` will schedule 100 jobs to run (each " +"simulating a client sampled by the strategy) and then will execute them " +"in a resource-aware manner in batches of 8." +msgstr "" +"虽然 :code:`client_resources` 可用来控制 FL " +"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " +"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " +"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " +"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "在 Slack 上加入我们" +#: ../../source/how-to-run-simulations.rst:91 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." +msgstr "" +"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " +"`_。" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower 框架" +#: ../../source/how-to-run-simulations.rst:94 +msgid "Simulation examples" +msgstr "模拟示例" -#: ../../source/index.rst:25 +#: ../../source/how-to-run-simulations.rst:96 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " -"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" - -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "教程" +"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " +"`_ 中提供。您也可以在 Google Colab 上运行它们:" -#: ../../source/index.rst:32 +#: ../../source/how-to-run-simulations.rst:98 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." -msgstr "以学习为导向的联邦学习教程系列,最好的起点。" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" -#: ../../source/index.rst:61 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:99 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " -"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " -":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" -":`scikit-learn ` | :doc:`XGBoost " -"` | :doc:`Android ` | :doc:`iOS `" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" -msgstr "我们还为 PyTorch 制作了视频教程:" +"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "还有 TensorFlow:" +#: ../../source/how-to-run-simulations.rst:104 +msgid "Multi-node Flower simulations" +msgstr "多节点 Flower 模拟" -#: ../../source/index.rst:76 +#: ../../source/how-to-run-simulations.rst:106 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." -msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" +"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " +"across multiple compute nodes. Before starting your multi-node simulation" +" ensure that you:" +msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" -#: ../../source/index.rst:110 -msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." -msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" +#: ../../source/how-to-run-simulations.rst:108 +msgid "Have the same Python environment in all nodes." +msgstr "所有节点都有相同的 Python 环境。" -#: ../../source/index.rst:120 -msgid "References" -msgstr "参考资料" +#: ../../source/how-to-run-simulations.rst:109 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "以信息为导向的 API 参考资料和其他参考资料。" +#: ../../source/how-to-run-simulations.rst:110 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" +msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" -#: ../../source/index.rst:131::1 -#, fuzzy -msgid ":py:obj:`flwr `\\" -msgstr ":py:obj:`flwr `\\" +#: ../../source/how-to-run-simulations.rst:111 +msgid "" +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " +"`_ so the " +":code:`VirtualClientEngine` attaches to a running Ray instance." +msgstr "" +"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " +":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" -#: ../../source/index.rst:131::1 flwr:1 of -#, fuzzy -msgid "Flower main package." -msgstr "Flower 主包装。" +#: ../../source/how-to-run-simulations.rst:112 +msgid "" +"Start Ray on you head node: on the terminal type :code:`ray start " +"--head`. This command will print a few lines, one of which indicates how " +"to attach other nodes to the head node." +msgstr "" +"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" +"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "贡献者文档" +#: ../../source/how-to-run-simulations.rst:113 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +":code:`ray start --address='192.168.1.132:6379'`" +msgstr "" +"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " +"--address='192.168.1.132:6379'`" -#: ../../source/index.rst:150 +#: ../../source/how-to-run-simulations.rst:115 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." -msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" -msgstr "Flower CLI 参考" +#: ../../source/how-to-run-simulations.rst:117 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command :code:`ray stop` in each node's " +"terminal (including the head node)." +msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" -#: ../../source/ref-api-cli.rst:7 -#, fuzzy -msgid "flower-simulation" -msgstr "运行模拟" +#: ../../source/how-to-run-simulations.rst:120 +msgid "Multi-node simulation good-to-know" +msgstr "了解多节点模拟" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" +#: ../../source/how-to-run-simulations.rst:122 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" -#: ../../source/ref-api-cli.rst:27 -#, fuzzy -msgid "flower-client-app" -msgstr "Flower 客户端。" +#: ../../source/how-to-run-simulations.rst:124 +msgid "" +"User :code:`ray status` to check all nodes connected to your head node as" +" well as the total resources available to the " +":code:`VirtualClientEngine`." +msgstr "" +"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " +"可用的总资源。" -#: ../../source/ref-api-cli.rst:37 -#, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" +#: ../../source/how-to-run-simulations.rst:126 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +":code:`VirtualClientEngine` can schedule as many `virtual` clients as " +"that node can possible run. In some settings you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"`--num-cpus=` and/or `--num-" +"gpus=` in any :code:`ray start` command (including " +"when starting the head)" +msgstr "" +"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" +" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" +" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" +"gpus=`" -#: ../../source/ref-api/flwr.rst:2 -#, fuzzy -msgid "flwr" -msgstr "Flower" +#: ../../source/how-to-run-simulations.rst:132 +msgid "Considerations for simulations" +msgstr "模拟的注意事项" -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -#, fuzzy -msgid "Modules" -msgstr "模块" +#: ../../source/how-to-run-simulations.rst:135 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.client `\\" -msgstr ":py:obj:`flwr.client `\\" +#: ../../source/how-to-run-simulations.rst:138 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" +"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" +" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " +"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "Flower 客户端。" +#: ../../source/how-to-run-simulations.rst:141 +msgid "GPU resources" +msgstr "GPU 资源" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.common `\\" -msgstr ":py:obj:`flwr.common `\\" +#: ../../source/how-to-run-simulations.rst:143 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"internally by the VCE) is by default:" +msgstr "" +"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " +"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "服务器和客户端共享的通用组件。" +#: ../../source/how-to-run-simulations.rst:146 +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set :code:`num_gpus=0.5` and you have two GPUs in your system with " +"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" +" concurrently." +msgstr "" +"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " +"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.server `\\" -msgstr ":py:obj:`flwr.server `\\" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." -msgstr "Flower 服务器。" +#: ../../source/how-to-run-simulations.rst:149 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" +"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.simulation `\\" -msgstr ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-run-simulations.rst:150 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " +"experiment." +msgstr "" +"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " +":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -#, fuzzy -msgid "Flower simulation." -msgstr "运行模拟" +#: ../../source/how-to-run-simulations.rst:153 +msgid "" +"In addition, the GPU resource limits passed to :code:`client_resources` " +"are not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" +"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" +" VRAM 超过启动模拟时指定的比例。" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "客户端" +#: ../../source/how-to-run-simulations.rst:156 +msgid "TensorFlow with GPUs" +msgstr "使用 GPU 的 TensorFlow" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -#, fuzzy -msgid "Functions" -msgstr "四种函数:" +#: ../../source/how-to-run-simulations.rst:158 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." +msgstr "" +"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" +" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " +"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " +"`_来禁用这一默认行为。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:160 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " +"in order to specify a function to be executed upon actor initialization. " +"In this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" +"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " +":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" +" TF 工作负载的 GPU 增长,它看起来如下:" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -#, fuzzy -msgid "Run Flower client app." -msgstr "Flower 客户端。" +#: ../../source/how-to-run-simulations.rst:179 +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." +msgstr "" +"这正是 \"Tensorflow/Keras 模拟 " +"`_\"示例中使用的机制。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:183 +msgid "Multi-node setups" +msgstr "多节点设置" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -#, fuzzy -msgid "Run Flower SuperNode." -msgstr "Flower 服务器。" +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" +"VCE 目前不提供控制特定 \"虚拟 " +"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " +"FL " +"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" +" nfs 或数据库)来避免数据重复。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:187 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " +"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" +" \"状态\"。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "保存和加载模型检查点" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." +msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "模型检查点" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +":code:`Strategy` methods. Implementing custom strategies is always an " +"option, but for many cases it may be more convenient to simply customize " +"an existing strategy. The following code example defines a new " +":code:`SaveModelStrategy` which customized the existing built-in " +":code:`FedAvg` strategy. In particular, it customizes " +":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " +"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" +" before it returns those aggregated weights to the caller (i.e., the " +"server):" msgstr "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"模型更新可通过自定义 :code:`Strategy` " +"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " +":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " +"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " +":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." -msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +msgid "Save and load PyTorch checkpoints" +msgstr "保存和加载 PyTorch 检查点" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -#, fuzzy -msgid "Classes" -msgstr "类别" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." +msgstr "" +"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " +"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " +"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" +msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 #, fuzzy -msgid ":py:obj:`Client `\\ \\(\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +msgid "" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." +msgstr "" +"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " +"``initial_parameters` 中。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "Flower 客户端的抽象基类。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "升级至 Flower 1.0" -#: ../../source/ref-api/flwr.client.rst:34::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " +"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -#, fuzzy -msgid "Flower ClientApp." -msgstr "Flower 客户端。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 +#: ../../source/how-to-upgrade-to-flower-next.rst:43 +msgid "Install update" +msgstr "安装更新" -#: ../../source/ref-api/flwr.client.rst:34::1 -#, fuzzy -msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 安装时添加 ``-U``." -#: ../../source/ref-api/flwr.client.rst:52::1 -#, fuzzy -msgid ":py:obj:`flwr.client.mod `\\" -msgstr ":py:obj:`flwr.client `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" +msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -#, fuzzy -msgid "Flower Built-in Mods." -msgstr "使用内置调制器" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +msgid "" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -#, fuzzy -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -#, fuzzy -msgid "Methods" -msgstr "方法" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" +msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " +"}``(当使用``start_simulation``时)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "使用本地数据集评估所提供的参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +#: ../../source/how-to-upgrade-to-flower-next.rst:100 +msgid "Required changes" +msgstr "所需变更" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +msgid "The following breaking changes require manual updates." +msgstr "以下更改需要手动更新。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "利用本地数据集完善所提供的参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +msgid "General" +msgstr "一般情况" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -#, fuzzy -msgid "Get the run context from this client." -msgstr "评估客户端的反应。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" -msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" +msgstr "" +"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "返回当前本地模型参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "客户端" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_properties `\\ \\(ins\\)" -msgstr ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" +msgstr "" +"NumPyClient的子类:将``def get_parameters(self):```改为``def " +"get_parameters(self,config):``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" +msgstr "" +"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " +"GetParametersIns):\"" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "策略 / ``start_server`` / ``start_simulation``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -#, fuzzy -msgid "Apply a run context to this client." -msgstr "将运行上下文应用于该客户端。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" +msgstr "" +"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " +"dictionary)。下面是一个例子:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" +msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "返回客户端(本身)。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -#, fuzzy -msgid "Attributes" -msgstr "属性" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" -#: flwr.client.client.Client.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." +msgstr "" +"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " +"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "参数" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "重命名参数/数组转换函数:" -#: flwr.client.client.Client.evaluate:3 of -msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" -msgstr "返回" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." -msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "返回类型" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +msgstr "" +"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " +"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " +"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " +"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" -#: flwr.client.client.Client.fit:3 of -msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." -msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "重命名内置策略参数(例如,`FedAvg``):" -#: flwr.client.client.Client.fit:8 of -msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." -msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.client.Client.get_parameters:3 of -msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取参数指令包含配置值字典。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "当前的本地模型参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取属性指令包含配置值字典。" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +msgstr "" +"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " +"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" +" 和 ``evaluate_fn``。" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "当前客户端属性。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -#, fuzzy -msgid "ClientApp" -msgstr "客户端" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -#, fuzzy -msgid "Bases: :py:class:`object`" -msgstr "Bases: :py:class:`object`" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" -msgstr "实例" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "定制策略" -#: flwr.client.client_app.ClientApp:5 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" -msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" +msgstr "" +"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," +" FitRes], " +"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " +"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" -#: flwr.client.client_app.ClientApp:16 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" -#: flwr.client.client_app.ClientApp:21 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " -"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr ":py:obj:`evaluate `\\ \\(\\)" - -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`query `\\ \\(\\)" -msgstr ":py:obj:`query `\\ \\(\\)" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:```" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -#, fuzzy -msgid "Return a decorator that registers the query fn with the client app." -msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "server.strategy.Strategy" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "可选的改进措施" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -#, fuzzy -msgid "Return a decorator that registers the train fn with the client app." -msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." +msgstr "" +"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " +"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +#: ../../source/how-to-upgrade-to-flower-next.rst:317 +msgid "Further help" +msgstr "更多帮助" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "使用本地数据集训练所提供的参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." +msgstr "" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:2 #, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +msgid "Upgrade to Flower Next" +msgstr "升级至 Flower 1.0" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:4 #, fuzzy msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " +"Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:9 #, fuzzy msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " +"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#, fuzzy +msgid "Let's dive in!" +msgstr "让我们深入了解一下!" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:48 #, fuzzy msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" -msgstr "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:54 #, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "将对象转换为客户类型并返回。" +msgid "or if you need Flower Next with simulation:" +msgstr "启动 Flower 模拟" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:61 #, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "当前(全局)模型参数。" +#: ../../source/how-to-upgrade-to-flower-next.rst:71 +#, fuzzy +msgid "or ``pyproject.toml``:" +msgstr "或 ``pyproject.toml```:" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of -msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." -msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" +#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#, fuzzy +msgid "Using Poetry" +msgstr "使用 pip" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#, fuzzy msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " -"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#, fuzzy msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." -msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" - -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#, fuzzy msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " -"str 类型值的字典。它可用于将任意值传回服务器。" +"在 Flower Next " +"中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" +" |serverapp_link|_ " +"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" +" Flower Next 方式运行项目:" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of -msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." -msgstr "" -"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," -" Scalar])已被弃用和移除。" +#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#, fuzzy +msgid "|clientapp_link|_" +msgstr "客户端" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#, fuzzy msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." -msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." -msgstr "" -"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " -"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" - -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of -msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" - -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of -msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#, fuzzy +msgid "|serverapp_link|_" +msgstr "服务器" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#, fuzzy msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." -msgstr "" -"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " -"类型值的字典。它可用于将任意属性值传回服务器。" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" -#: ../../source/ref-api/flwr.client.mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:154 #, fuzzy -msgid "mod" -msgstr "模块" +msgid "Deployment" +msgstr "调配" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#, fuzzy msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" +"在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " +"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " +"`server.py` 作为 Python 脚本执行。" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:158 #, fuzzy -msgid "Client-side adaptive clipping modifier." -msgstr "客户端逻辑" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" -msgstr "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:174 #, fuzzy -msgid "Client-side fixed clipping modifier." -msgstr "客户端逻辑" +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." +msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:201 #, fuzzy -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "" +msgid "Simulation in CLI" +msgstr "运行模拟" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:202 #, fuzzy msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" +"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " +"|startsim_link|_。下面是一个示例:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:232 #, fuzzy msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " +"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " +"`client_app`` 对象位于 `sim.py`` 模块中):" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#, fuzzy msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" +"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " +"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:275 #, fuzzy -msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" -msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +msgid "Simulation in a Notebook" +msgstr "笔记本中的模拟" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:276 #, fuzzy -msgid "Message size mod." -msgstr "信息类型。" +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:319 #, fuzzy msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:325 #, fuzzy -msgid "Parameters size mod." -msgstr "参数" +msgid "Important" +msgstr "重要变更:" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:328 #, fuzzy msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" -msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:334 #, fuzzy -msgid "Modifier for local differential privacy." -msgstr "差分隐私" +msgid "Happy migrating! 🚀" +msgstr "移民愉快!🚀" -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:2 #, fuzzy -msgid "LocalDpMod" -msgstr "本地 DP 模式" +msgid "Use Built-in Mods" +msgstr "使用内置调制器" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +#: ../../source/how-to-use-built-in-mods.rst:4 +#, fuzzy msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." -msgstr "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." +#: ../../source/how-to-use-built-in-mods.rst:6 +#, fuzzy +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" +"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" +" 处理任务之前和之后执行操作。" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of +#: ../../source/how-to-use-built-in-mods.rst:9 #, fuzzy -msgid "The value of the clipping norm." -msgstr "削波法线的值。" +msgid "What are Mods?" +msgstr "什么是 Mods?" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." +#: ../../source/how-to-use-built-in-mods.rst:11 +#, fuzzy +msgid "" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" +"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " +"``Message`` 。一个 ``Mod`` 的签名如下:" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of -msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." -msgstr "" - -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of -msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:18 +#, fuzzy +msgid "A typical mod function might look something like this:" +msgstr "一个典型的修改函数可能是这样的:" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:31 +#, fuzzy +msgid "Using Mods" +msgstr "使用修改器" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:33 +#, fuzzy +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-use-built-in-mods.rst:36 #, fuzzy -msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" +msgid "1. Import the required mods" +msgstr "1. 导入所需修改" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +#: ../../source/how-to-use-built-in-mods.rst:38 #, fuzzy -msgid "The wrapper sends the clipping_norm value to the client." -msgstr "向客户发送近端因子mu" +msgid "First, import the built-in mod you intend to use:" +msgstr "首先,导入您打算使用的内置模式:" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:46 +#, fuzzy +msgid "2. Define your client function" +msgstr "2. 定义客户功能" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-use-built-in-mods.rst:48 +#, fuzzy msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." -msgstr "" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of +#: ../../source/how-to-use-built-in-mods.rst:57 #, fuzzy -msgid "Notes" -msgstr "无" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." -msgstr "" +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. 用模块创建 ``ClientApp``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:59 +#, fuzzy +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:72 #, fuzzy -msgid "fixedclipping\\_mod" -msgstr "剪贴" +msgid "Order of execution" +msgstr "停用" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-use-built-in-mods.rst:74 #, fuzzy msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." -msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" - -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" -msgstr "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." -msgstr "" +#: ../../source/how-to-use-built-in-mods.rst:76 +#, fuzzy +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1`` (最外层模块)" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:77 #, fuzzy -msgid "parameters\\_size\\_mod" -msgstr "参数" +msgid "``example_mod_2`` (next mod)" +msgstr "示例模式 2(下一个模式)" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-use-built-in-mods.rst:78 +#, fuzzy msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" -msgstr "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:79 #, fuzzy -msgid "secaggplus\\_mod" -msgstr "工作流程" +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2`` (返回途中)" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:80 #, fuzzy -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1`` (返回途中最外层的模式)" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:82 #, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" -#: ../../source/ref-api/flwr.client.start_client.rst:2 +#: ../../source/how-to-use-built-in-mods.rst:87 #, fuzzy -msgid "start\\_client" -msgstr "启动客户端" - -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " -"8080,则`server_address`应为`\"[::]:8080\"`。" +"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " +"的顺序至关重要,它会影响输入和输出的处理方式。" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "用于实例化客户端的可调用程序。(默认值:无)" +#: ../../source/how-to-use-built-in-mods.rst:89 +#, fuzzy +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" -#: flwr.client.app.start_client:9 of -msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" -msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "差分隐私" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-use-differential-privacy.rst:3 +#, fuzzy msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" +"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" +"differential-privacy` 。" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-use-differential-privacy.rst:7 +#, fuzzy msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." -msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." +msgstr "" +"Flower " +"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-use-differential-privacy.rst:12 #, fuzzy msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." -msgstr "" -"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " -"None,则使用系统证书。" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-use-differential-privacy.rst:15 +#, fuzzy msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " -"'rest': HTTP(实验性)" +"** 服务器端剪切**: " +"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-use-differential-privacy.rst:16 #, fuzzy msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." -msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy -msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." -msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "使用不安全的服务器连接启动 gRPC 客户端:" - -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -#, fuzzy -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" +msgid "Server-side Clipping" +msgstr "服务器端逻辑" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +#: ../../source/how-to-use-differential-privacy.rst:22 #, fuzzy -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" +msgid "" +"For central DP with server-side clipping, there are two :code:`Strategy` " +"classes that act as wrappers around the actual :code:`Strategy` instance " +"(for example, :code:`FedAvg`). The two wrapper classes are " +":code:`DifferentialPrivacyServerSideFixedClipping` and " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " +"adaptive clipping." +msgstr "" +"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " +":code:`FedAvg`)的包装器。这两个封装类分别是 " +":code:`DifferentialPrivacyServerSideFixedClipping` 和 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" +msgid "server side clipping" +msgstr "服务器端逻辑" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"The code sample below enables the :code:`FedAvg` strategy to use server-" +"side fixed clipping using the " +":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " +"same approach can be used with " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"corresponding input parameters." msgstr "" -"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " -":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" - -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" - -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "常见" +"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " +":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:52 #, fuzzy -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgid "Client-side Clipping" +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#: ../../source/how-to-use-differential-privacy.rst:53 #, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "将参数对象转换为 NumPy ndarrays。" +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +":code:`Mods` to perform the clipping. Two mods are available for fixed " +"and adaptive client-side clipping: :code:`fixedclipping_mod` and " +":code:`adaptiveclipping_mod` with corresponding server-side wrappers " +":code:`DifferentialPrivacyClientSideFixedClipping` and " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +msgstr "" +"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " +":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " +":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " +":code:`DifferentialPrivacyClientSideFixedClipping` 和 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +msgid "client side clipping" +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:63 #, fuzzy msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"The code sample below enables the :code:`FedAvg` strategy to use " +"differential privacy with client-side fixed clipping using both the " +":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " +"the client, :code:`fixedclipping_mod`:" msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." -msgstr "配置将日志记录到文件和/或远程日志服务器。" +"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " +":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:80 #, fuzzy msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"In addition to the server-side strategy wrapper, the :code:`ClientApp` " +"needs to configure the matching :code:`fixedclipping_mod` to perform the " +"client-side clipping:" msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -#, fuzzy -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" +"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " +"以执行客户端剪切:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:97 #, fuzzy msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" +"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " +"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +msgid "local DP mod" +msgstr "本地 DP 模式" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:104 #, fuzzy -msgid ":py:obj:`now `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" +msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:122 #, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" -msgstr "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "将 NumPy ndarrays 转换为参数对象。" +#: ../../source/how-to-use-differential-privacy.rst:125 +#, fuzzy +msgid "Local Training using Privacy Engines" +msgstr "使用隐私引擎进行本地培训" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-differential-privacy.rst:126 #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" +" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " +"`_, `Tensorflow" +" Privacy `_)。" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "将参数对象转换为 NumPy ndarrays。" +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "使用策略" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:4 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" -msgstr "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -#, fuzzy -msgid "Array type." -msgstr "返回类型" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:6 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" -msgstr "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "ClientMessage 是用于容纳一条结果信息的容器。" +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "使用现有策略,例如 :code:`FedAvg`" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "使用回调函数定制现有策略" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "客户端状态代码。" +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "实施新策略" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "使用现有策略" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -#, fuzzy -msgid "Configs record." -msgstr "配置日志记录" +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -#, fuzzy -msgid "State of your run." -msgstr "您的运行状态。" +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "配置客户匹配和客户评估" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "客户端向服务器发送 DisconnectRes 信息。" +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitrary configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" +"服务器可以通过向 :code:`on_fit_config_fn` " +"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" +" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:75 #, fuzzy msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." msgstr "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " +":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "评估客户端的指示。" +#: ../../source/how-to-use-strategies.rst:78 +msgid "" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" +msgstr "" +"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " +":code:`client.evaluate()` 的配置" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" +msgstr "配置服务器端评估" + +#: ../../source/how-to-use-strategies.rst:83 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." +msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" + +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " +"指南,了解更多信息。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "评估客户端的反应。" +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "教程" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "快速入门教程" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "遥测事件类型。" +#: ../../source/index.rst:75 ../../source/index.rst:79 +msgid "How-to guides" +msgstr "操作指南" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/index.rst:100 +msgid "Legacy example guides" +msgstr "旧版指南范例" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "为客户提供安装说明。" +#: ../../source/index.rst:108 ../../source/index.rst:112 +msgid "Explanations" +msgstr "说明" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" -msgstr "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +#: None:-1 +msgid "API reference" +msgstr "应用程序接口参考" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "来自客户端的合适回复。" +#: ../../source/index.rst:138 +msgid "Reference docs" +msgstr "参考文档" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/index.rst:154 +msgid "Contributor tutorials" +msgstr "贡献者教程" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -#, fuzzy -msgid "A dataclass that stores information about an error that occurred." -msgstr "数据类,用于存储所发生错误的相关信息。" +#: ../../source/index.rst:161 +msgid "Contributor how-to guides" +msgstr "投稿指南" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: ../../source/index.rst:173 +msgid "Contributor explanations" +msgstr "贡献者解释" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "客户端的参数请求。" +#: ../../source/index.rst:179 +msgid "Contributor references" +msgstr "贡献者参考资料" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/index.rst:-1 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" -msgstr "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "要求返回参数时的响应。" +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "Flower 框架文档" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "客户端的属性请求。" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "加入 Flower 社区" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/index.rst:13 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" -msgstr "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "来自客户端的属性响应。" +#: ../../source/index.rst:15 +msgid "Join us on Slack" +msgstr "在 Slack 上加入我们" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 框架" + +#: ../../source/index.rst:25 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -#, fuzzy -msgid "State of your application from the viewpoint of the entity using it." -msgstr "从使用实体的角度看应用程序的状态。" +"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " +"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "教程" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -#, fuzzy -msgid "Message type." -msgstr "信息类型。" +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "以学习为导向的联邦学习教程系列,最好的起点。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:62 #, fuzzy -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " +"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " +":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" +":`scikit-learn ` | :doc:`XGBoost " +"` | :doc:`Android ` | :doc:`iOS `" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -#, fuzzy -msgid "Legacy message type." -msgstr "传统信息类型。" +#: ../../source/index.rst:64 +msgid "We also made video tutorials for PyTorch:" +msgstr "我们还为 PyTorch 制作了视频教程:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/index.rst:69 +msgid "And TensorFlow:" +msgstr "还有 TensorFlow:" + +#: ../../source/index.rst:77 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" -msgstr "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of +#: ../../source/index.rst:110 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" + +#: ../../source/index.rst:121 +msgid "References" +msgstr "参考资料" + +#: ../../source/index.rst:123 +msgid "Information-oriented API reference and other reference material." +msgstr "以信息为导向的 API 参考资料和其他参考资料。" + +#: ../../source/index.rst:132::1 #, fuzzy -msgid "A dataclass holding metadata associated with the current message." -msgstr "数据类型,包含与当前报文相关的元数据。" +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:132::1 flwr:1 of #, fuzzy +msgid "Flower main package." +msgstr "Flower 主包装。" + +#: ../../source/index.rst:149 +msgid "Contributor docs" +msgstr "贡献者文档" + +#: ../../source/index.rst:151 msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -msgstr "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 参考" + +#: ../../source/ref-api-cli.rst:7 #, fuzzy -msgid "Metrics record." -msgstr "指标记录。" +msgid "flwr CLI" +msgstr "Flower 客户端" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr:1 #, fuzzy -msgid ":py:obj:`NDArray `\\" -msgstr ":py:obj:`NDArray `\\" +msgid "flwr is the Flower command line interface." +msgstr "注册 Flower ClientProxy 实例。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgid "Options" +msgstr "解决方案" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr:1 #, fuzzy +msgid "Install completion for the current shell." +msgstr "当前运行的标识符。" + +#: ../../flwr:1 msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "模型参数。" +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../flwr build:1 msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -#, fuzzy -msgid "Parameters record." -msgstr "参数" +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." +msgstr "" + +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../flwr install:1 #, fuzzy -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgid "Install a Flower App Bundle." +msgstr "安装Flower" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "服务器发送给客户端的重新连接信息。" +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -#, fuzzy -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "RecordSet 可存储参数、指标和配置组。" +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." -msgstr "ServerMessage 是用于容纳一条指令信息的容器。" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "客户端状态。" +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -#, fuzzy -msgid "Array" -msgstr "数组" +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." -msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" +msgid "Arguments" +msgstr "参数解析器" -#: flwr.common.record.parametersrecord.Array:6 of +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" -msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" +msgid "Optional argument" +msgstr "可选的改进措施" -#: flwr.common.record.parametersrecord.Array:8 of -#, fuzzy -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." -msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of -#, fuzzy -msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" -#: flwr.common.record.parametersrecord.Array:15 of +#: ../../flwr log #, fuzzy -msgid "A buffer of bytes containing the data." -msgstr "包含数据的字节缓冲区。" +msgid "default" +msgstr "工作流程" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +#: ../../flwr log:1 +msgid "``True``" +msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of +#: ../../flwr log:1 #, fuzzy -msgid "Return the array as a NumPy array." -msgstr "以 NumPy ndarrays 列表形式返回模型参数" +msgid "Required argument" +msgstr "构建文档" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`dtype `\\" -msgstr ":py:obj:`dtype `\\" +msgid "Create new Flower App." +msgstr "Flower 服务器。" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "server.strategy.Strategy" +#: ../../flwr new:1 +msgid "The ML framework to use" +msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../flwr new #, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "server.strategy.Strategy" +msgid "options" +msgstr "解决方案" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`data `\\" -msgstr ":py:obj:`data `\\" +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -#, fuzzy -msgid "ClientMessage" -msgstr "客户端" +#: ../../flwr new:1 +msgid "The Flower username of the author" +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../flwr run:1 #, fuzzy -msgid ":py:obj:`evaluate_res `\\" -msgstr ":py:obj:`evaluate_res `\\" +msgid "Run Flower App." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy -msgid ":py:obj:`fit_res `\\" -msgstr ":py:obj:`fit_res `\\" +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`get_parameters_res " -"`\\" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -":py:obj:`get_parameters_res " -"`\\" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`get_properties_res " -"`\\" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -":py:obj:`get_properties_res " -"`\\" -#: ../../source/ref-api/flwr.common.Code.rst:2 +#: ../../source/ref-api-cli.rst:16 #, fuzzy -msgid "Code" -msgstr "代码" +msgid "flower-simulation" +msgstr "运行模拟" -#: flwr.common.typing.Code:1 of -#, fuzzy -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`~enum.Enum`" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid ":py:obj:`OK `\\" -msgstr ":py:obj:`OK `\\" +msgid "flower-supernode" +msgstr "Flower 服务器" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/ref-api-cli.rst:46 #, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" + +#: ../../source/ref-api-cli.rst:49 msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " +"longer supports passing a reference to a `ServerApp` attribute. Instead, " +"you need to pass the path to Flower app via the argument :code:`--app`. " +"This is the path to a directory containing a `pyproject.toml`. You can " +"create a valid Flower app by executing :code:`flwr new` and following the" +" prompt." msgstr "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/ref-api-cli.rst:62 #, fuzzy -msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" -msgstr "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +msgid "flower-superexec" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/ref-api/flwr.rst:2 #, fuzzy -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgid "flwr" +msgstr "Flower" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 #, fuzzy -msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" -msgstr "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +msgid "Modules" +msgstr "模块" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid "ConfigsRecord" -msgstr "配置日志记录" +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -#, fuzzy -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " -":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " -"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " -":py:class:`~typing.List`\\ [:py:class:`bytes`], " -":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 客户端。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid "Remove all items from R." -msgstr "从 R 中删除所有项目。" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "服务器和客户端共享的通用组件。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid "Return number of Bytes stored in this object." -msgstr "返回存储在此对象中的字节数。" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 服务器。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of #, fuzzy -msgid "d defaults to None." -msgstr "d 默认为 \"无\"。" +msgid "Flower simulation." +msgstr "运行模拟" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "客户端" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid "Functions" +msgstr "四种函数:" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.client.rst:23::1 #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -#, fuzzy -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.client.rst:23::1 #, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -#, fuzzy -msgid "Update R from dict/iterable E and F." -msgstr "根据二进制/可迭代 E 和 F 更新 R。" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid "Classes" +msgstr "类别" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid "This function counts booleans as occupying 1 Byte." -msgstr "该函数将布尔值计算为占用 1 个字节。" +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.Context.rst:2 -#, fuzzy -msgid "Context" -msgstr "背景" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 客户端的抽象基类。" -#: flwr.common.context.Context:3 of +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" -msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" +msgid "Flower ClientApp." +msgstr "Flower 客户端。" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid "DisconnectRes" -msgstr "断开Res" +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -#, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" -#: ../../source/ref-api/flwr.common.Error.rst:2 +#: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid "Error" -msgstr "错误" +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client `\\" -#: flwr.common.message.Error:3 of +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of #, fuzzy -msgid "An identifier for the error." -msgstr "错误的标识符。" +msgid "Flower Built-in Mods." +msgstr "使用内置调制器" -#: flwr.common.message.Error:5 of +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of #, fuzzy -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "出错原因(如异常堆栈跟踪)" +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" -#: flwr.common.Error.code:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 #, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +msgid "Methods" +msgstr "方法" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "Error code." -msgstr "错误代码。" +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" -#: flwr.common.Error.code:1::1 of -#, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "使用本地数据集评估所提供的参数。" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "Reason reported about the error." -msgstr "报告的错误原因。" +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -#, fuzzy -msgid "EvaluateIns" -msgstr "说明" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "利用本地数据集完善所提供的参数。" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "Get the run context from this client." +msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "EvaluateRes" -msgstr "评估Res" +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "返回当前本地模型参数。" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`loss `\\" -msgstr ":py:obj:`loss `\\" +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "返回客户端的属性集。" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:2 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of #, fuzzy -msgid "EventType" -msgstr "返回类型" +msgid "Apply a run context to this client." +msgstr "将运行上下文应用于该客户端。" -#: flwr.common.telemetry.EventType:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "返回客户端(本身)。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 #, fuzzy -msgid "Encode the string using the codec registered for encoding." -msgstr "使用注册的编码解码器对字符串进行编码。" +msgid "Attributes" +msgstr "属性" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.Client.context:1::1 of #, fuzzy -msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -#, fuzzy -msgid "Return a copy with all occurrences of substring old replaced by new." -msgstr "返回用 new 替换子串 old 的所有出现次数的副本。" +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "参数" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -#, fuzzy +#: flwr.client.client.Client.evaluate:3 of msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." -msgstr "使用 sep 作为分隔符,返回字符串中的子字符串列表。" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "返回" + +#: flwr.client.client.Client.evaluate:8 of msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "返回类型" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -#, fuzzy -msgid "Concatenate any number of strings." -msgstr "连接任意数量的字符串。" +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -#, fuzzy -msgid "Return a capitalized version of the string." -msgstr "返回字符串的大写版本。" +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取参数指令包含配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "当前的本地模型参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -#, fuzzy -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "返回适合无例比较的字符串版本。" +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取属性指令包含配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "当前客户端属性。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 #, fuzzy -msgid "Return a version of the string where each word is titlecased." -msgstr "返回字符串的版本,其中每个单词都使用了标题大小写。" +msgid "ClientApp" +msgstr "客户端" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of #, fuzzy -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -#, fuzzy -msgid "Return a centered string of length width." -msgstr "返回客户端的属性集。" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "实例" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:5 of #, fuzzy msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:16 of #, fuzzy msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." -msgstr "返回子字符串 sub 在字符串 S[start:end] 中非重叠出现的次数。" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:21 of #, fuzzy msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" +"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " +"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "返回使用空格扩展所有制表符的副本。" +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." -msgstr "返回在 S 中找到子串 sub 的最低索引,且 sub 包含在 S[start:end] 中。" +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of #, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" +msgid "Return a decorator that registers the query fn with the client app." +msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "Partition the string into three parts using the given separator." -msgstr "使用给定的分隔符将字符串分为三部分。" +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of #, fuzzy -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "Return a decorator that registers the train fn with the client app." +msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid "Return a left-justified string of length width." -msgstr "返回长度为 width 的左对齐字符串。" +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -#, fuzzy -msgid "Return a copy of the string converted to lowercase." -msgstr "返回转换为小写的字符串副本。" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "使用本地数据集训练所提供的参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid "Return a copy of the string with leading whitespace removed." -msgstr "返回去掉前导空白的字符串副本。" +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_properties `\\ " +"\\(config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." -msgstr "返回在 S 中找到子串 sub 的最高索引,且 sub 包含在 S[start:end] 中。" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "返回客户端的属性集。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -#, fuzzy -msgid "Return a right-justified string of length width." -msgstr "返回长度为 width 的右对齐字符串。" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "将对象转换为客户类型并返回。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.NumPyClient.context:1::1 of #, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -#, fuzzy -msgid "Return a copy of the string with trailing whitespace removed." -msgstr "返回去掉尾部空白的字符串副本。" +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "当前(全局)模型参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr ":py:obj:`PING `\\" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" +"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " +"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -#, fuzzy -msgid "Return a list of the lines in the string, breaking at line boundaries." -msgstr "返回字符串中的行列表,以行为分界线。" +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -#, fuzzy -msgid "Return a copy of the string with leading and trailing whitespace removed." -msgstr "返回去掉前导和尾部空白的字符串副本。" +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" +"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " +"str 类型值的字典。它可用于将任意值传回服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" +"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," +" Scalar])已被弃用和移除。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.fit:5 of msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." -msgstr "将大写字母转换为小写字母,将小写字母转换为大写字母。" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" +"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " +"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -#, fuzzy -msgid "Replace each character in the string using the given translation table." -msgstr "使用给定的翻译表替换字符串中的每个字符。" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -#, fuzzy -msgid "Return a copy of the string converted to uppercase." -msgstr "返回转换为大写字符串的副本。" +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return True if S starts with the specified prefix, False otherwise." -msgstr "如果 S 以指定前缀开头,则返回 True,否则返回 False。" +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" +"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " +"类型值的字典。它可用于将任意属性值传回服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:2 #, fuzzy +msgid "mod" +msgstr "模块" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of #, fuzzy -msgid "Return True if S ends with the specified suffix, False otherwise." -msgstr "如果 S 以指定后缀结束,则返回 True,否则返回 False。" +msgid "Client-side adaptive clipping modifier." +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of #, fuzzy -msgid "Return a str with the given prefix string removed if present." -msgstr "返回一个字符串,如果存在,则去掉给定的前缀字符串。" +msgid "Client-side fixed clipping modifier." +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -#, fuzzy -msgid "Return a str with the given suffix string removed if present." -msgstr "返回一个字符串,如果存在给定的后缀字符串,则将其删除。" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of #, fuzzy -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "如果字符串中的所有字符都是 ASCII 码,则返回 True,否则返回 False。" +msgid "Message size mod." +msgstr "信息类型。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of #, fuzzy -msgid "Return True if the string is a lowercase string, False otherwise." -msgstr "如果字符串是小写字符串,则返回 True,否则返回 False。" +msgid "Parameters size mod." +msgstr "参数" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -#, fuzzy -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "如果字符串是大写字符串,则返回 True,否则返回 False。" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -#, fuzzy -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "如果字符串是带标题的字符串,则返回 True,否则返回 False。" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:35::1 #, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of #, fuzzy -msgid "Return True if the string is a whitespace string, False otherwise." -msgstr "如果字符串是空白字符串,则返回 True,否则返回 False。" +msgid "Modifier for local differential privacy." +msgstr "差分隐私" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 #, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "LocalDpMod" +msgstr "本地 DP 模式" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -#, fuzzy -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "如果字符串是十进制字符串,则返回 True,否则返回 False。" +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of #, fuzzy -msgid "Return True if the string is a digit string, False otherwise." -msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" +msgid "The value of the clipping norm." +msgstr "削波法线的值。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -#, fuzzy -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -#, fuzzy -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "如果字符串是字母字符串,则返回 True,否则返回 False。" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of #, fuzzy -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "如果字符串是字母数字字符串,则返回 True,否则返回 False。" +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of #, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "向客户发送近端因子mu" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -#, fuzzy -msgid "Return True if the string is a valid Python identifier, False otherwise." -msgstr "如果字符串是有效的 Python 标识符,则返回 True,否则返回 False。" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of #, fuzzy -msgid "Return True if the string is printable, False otherwise." -msgstr "如果字符串可打印,则返回 True,否则返回 False。" +msgid "Notes" +msgstr "无" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 #, fuzzy -msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." -msgstr "在数字字符串左侧填充零,以填满给定宽度的字段。" +msgid "fixedclipping\\_mod" +msgstr "剪贴" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of #, fuzzy msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return a formatted version of S, using substitutions from args and kwargs." -msgstr "使用 args 和 kwargs 的替换,返回 S 的格式化版本。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "使用映射中的替换,返回 S 的格式化版本。" +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr ":py:obj:`TRAIN `\\" +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -#, fuzzy -msgid "Return a translation table usable for str.translate()." -msgstr "返回可用于 str.translate() 的翻译表。" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 #, fuzzy -msgid ":py:obj:`PING `\\" -msgstr ":py:obj:`PING `\\" +msgid "parameters\\_size\\_mod" +msgstr "参数" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_CLIENT_ENTER `\\" -msgstr ":py:obj:`START_CLIENT_ENTER `\\" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_CLIENT_LEAVE `\\" -msgstr ":py:obj:`START_CLIENT_LEAVE `\\" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 #, fuzzy -msgid ":py:obj:`START_SERVER_ENTER `\\" -msgstr ":py:obj:`START_SERVER_ENTER `\\" +msgid "secaggplus\\_mod" +msgstr "工作流程" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy -msgid ":py:obj:`START_SERVER_LEAVE `\\" -msgstr ":py:obj:`START_SERVER_LEAVE `\\" +msgid "start\\_client" +msgstr "启动客户端" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " +"8080,则`server_address`应为`\"[::]:8080\"`。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "用于实例化客户端的可调用程序。(默认值:无)" + +#: flwr.client.app.start_client:9 of msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of #, fuzzy msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " +"None,则使用系统证书。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " +"'rest': HTTP(实验性)" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:31 of #, fuzzy msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" -msgstr "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:35 of #, fuzzy msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" -msgstr "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of #, fuzzy -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of #, fuzzy -msgid ":py:obj:`START_DRIVER_ENTER `\\" -msgstr ":py:obj:`START_DRIVER_ENTER `\\" +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 #, fuzzy -msgid ":py:obj:`START_DRIVER_LEAVE `\\" -msgstr ":py:obj:`START_DRIVER_LEAVE `\\" +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_numpy_client:5 of #, fuzzy msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " +":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "常见" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of #, fuzzy -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +msgid "Create Array from NumPy ndarray." +msgstr "将参数对象转换为 NumPy ndarrays。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "从字节反序列化 NumPy ndarray。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "配置将日志记录到文件和/或远程日志服务器。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +#, fuzzy +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" -#: flwr.common.EventType.capitalize:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "更具体地说,让第一个字符大写,其余字符小写。" +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -#, fuzzy -msgid "Padding is done using the specified fill character (default is a space)." -msgstr "使用指定的填充字符(默认为空格)进行填充。" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" -#: flwr.common.EventType.count:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." -msgstr "返回子串 sub 在字符串 S[start:end] 中非重叠出现的次数。 可选参数 start 和 end 按切分符号解释。" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" -#: flwr.common.EventType.encode:3 of -#, fuzzy -msgid "encoding" -msgstr "编码" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "将 NumPy ndarrays 转换为参数对象。" -#: flwr.common.EventType.encode:4 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "The encoding in which to encode the string." -msgstr "字符串的编码。" +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.common.EventType.encode:9 of -#, fuzzy -msgid "errors" -msgstr "错误" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" -#: flwr.common.EventType.encode:6 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -"编码错误的错误处理方案。默认值为 \"strict\",即编码错误会引发 UnicodeEncodeError。 其他可能的值包括 " -"\"ignore\"、\"replace \"和 \"xmlcharrefreplace\",以及通过 codecs.register_error" -" 注册的、可处理 UnicodeEncodeErrror 的其他名称。" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" -#: flwr.common.EventType.endswith:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -"如果 S 以指定后缀结束,则返回 True,否则返回 False。如果起始位置可选,则从该位置开始测试 S。如果使用可选的 " -"end,则在该位置停止比较 S。后缀也可以是要尝试的字符串元组。" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" -#: flwr.common.EventType.expandtabs:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of #, fuzzy -msgid "If tabsize is not given, a tab size of 8 characters is assumed." -msgstr "如果未给出制表符大小,则假定制表符大小为 8 个字符。" +msgid "Array type." +msgstr "返回类型" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." -msgstr "返回在 S 中找到子串 sub 的最低索引,即 sub 包含在 S[start:end] 中。 可选参数 start 和 end 按切分符号解释。" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -#, fuzzy -msgid "Return -1 on failure." -msgstr "失败时返回-1。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage 是用于容纳一条结果信息的容器。" -#: flwr.common.EventType.format:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." -msgstr "使用来自 args 和 kwargs 的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" -#: flwr.common.EventType.format_map:1 of -#, fuzzy -msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "使用映射中的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "客户端状态代码。" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Raises ValueError when the substring is not found." -msgstr "如果未找到子串,则引发 ValueError。" +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.EventType.isalnum:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." -msgstr "如果字符串中的所有字符都是字母数字,且字符串中至少有一个字符,则该字符串为字母数字字符串。" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.common.EventType.isalpha:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是字母,并且字符串中至少有一个字符,那么该字符串就是字母字符串。" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.EventType.isascii:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." -msgstr "ASCII 字符的码位范围为 U+0000-U+007F。空字符串也是 ASCII 字符。" +msgid "Configs record." +msgstr "配置日志记录" -#: flwr.common.EventType.isdecimal:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." -msgstr "如果字符串中的所有字符都是十进制,并且字符串中至少有一个字符是十进制,那么该字符串就是十进制字符串。" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.common.EventType.isdigit:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of #, fuzzy -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是数字,并且字符串中至少有一个字符,那么该字符串就是数字字符串。" +msgid "Context of your run." +msgstr "您的运行状态。" -#: flwr.common.EventType.isidentifier:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." -msgstr "调用 keyword.iskeyword(s) 测试字符串 s 是否为保留标识符,如 \"def \"或 \"class\"。" +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" -#: flwr.common.EventType.islower:3 of -#, fuzzy -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." -msgstr "如果字符串中的所有大小写字符都是小写,且字符串中至少有一个大小写字符,则该字符串为小写字符串。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "客户端向服务器发送 DisconnectRes 信息。" -#: flwr.common.EventType.isnumeric:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." -msgstr "如果字符串中的所有字符都是数字,且字符串中至少有一个字符,则该字符串为数字字符串。" +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -#: flwr.common.EventType.isprintable:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of #, fuzzy -msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." -msgstr "如果字符串的所有字符在 repr() 中都被认为是可打印的,或者字符串为空,那么该字符串就是可打印的。" +msgid "A dataclass that stores information about an error that occurred." +msgstr "数据类,用于存储所发生错误的相关信息。" -#: flwr.common.EventType.isspace:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是空格,且字符串中至少有一个字符,则该字符串为空格。" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" -#: flwr.common.EventType.istitle:3 of -#, fuzzy -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." -msgstr "在标题大小写字符串中,大写和标题大小写字符只能跟在无大小写字符之后,小写字符只能跟在有大小写字符之后。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "评估客户端的指示。" -#: flwr.common.EventType.isupper:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." -msgstr "如果字符串中所有带大小写的字符都是大写,并且字符串中至少有一个带大小写的字符,则该字符串为大写字符串。" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" -#: flwr.common.EventType.join:3 of -#, fuzzy -msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." -msgstr "方法被调用的字符串会被插入每个给定的字符串之间。结果将以新字符串的形式返回。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "评估客户端的反应。" -#: flwr.common.EventType.join:6 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -msgstr "示例:'.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "遥测事件类型。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "如果给定的是 chars 而不是 None,则删除 chars 中的字符。" +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -#: flwr.common.EventType.maketrans:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "为客户提供安装说明。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -"如果只有一个参数,则必须是一个将 Unicode 序号(整数)或字符映射到 Unicode 序号、字符串或 None " -"的字典。字符键将被转换为序号。如果有两个参数,它们必须是长度相等的字符串,在生成的字典中,x 中的每个字符将被映射到 y 中相同位置的字符。" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" -#: flwr.common.EventType.partition:3 of -#, fuzzy -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." -msgstr "它会在字符串中搜索分隔符。 如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "来自客户端的合适回复。" -#: flwr.common.EventType.partition:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." -msgstr "如果找不到分隔符,则返回一个包含原始字符串和两个空字符串的 3 元组。" +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" -#: flwr.common.EventType.removeprefix:3 of -#, fuzzy -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." -msgstr "如果字符串以前缀字符串开始,则返回 string[len(prefix):]。否则,返回原始字符串的副本。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "客户端的参数请求。" -#: flwr.common.EventType.removesuffix:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." -msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" -#: flwr.common.EventType.replace:5 of -#, fuzzy -msgid "count" -msgstr "背景" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "要求返回参数时的响应。" -#: flwr.common.EventType.replace:4 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." -msgstr "要替换的最大出现次数。-1(默认值)表示替换所有出现次数。" +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -#: flwr.common.EventType.replace:7 of -#, fuzzy -msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." -msgstr "如果给出可选参数 count,则只替换第一个计数出现的次数。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "客户端的属性请求。" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." -msgstr "返回在 S 中找到子串 sub 且 sub 包含在 S[start:end] 中的最高索引。 可选参数 start 和 end 按切分符号解释。" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" -#: flwr.common.EventType.rpartition:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "来自客户端的属性响应。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." -msgstr "它会从字符串的末尾开始搜索分隔符。如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: flwr.common.EventType.rpartition:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of #, fuzzy -msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." -msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" +msgid "State of your application from the viewpoint of the entity using it." +msgstr "从使用实体的角度看应用程序的状态。" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "sep" -msgstr "sep" +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of #, fuzzy -msgid "The separator used to split the string." -msgstr "用于分割字符串的分隔符。" +msgid "Message type." +msgstr "信息类型。" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." -msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of #, fuzzy -msgid "maxsplit" -msgstr "最大分割" +msgid "Legacy message type." +msgstr "传统信息类型。" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." -msgstr "最大分割次数(从左边开始)。-1(默认值)表示没有限制。" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" -#: flwr.common.EventType.rsplit:13 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of #, fuzzy -msgid "Splitting starts at the end of the string and works to the front." -msgstr "从琴弦末端开始分弦,一直到琴弦前端。" +msgid "A dataclass holding metadata associated with the current message." +msgstr "数据类型,包含与当前报文相关的元数据。" -#: flwr.common.EventType.split:13 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." -msgstr "注意,str.split() 主要适用于有意分隔的数据。 对于包含标点符号的自然文本,可以考虑使用正则表达式模块。" +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: flwr.common.EventType.splitlines:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." -msgstr "除非指定 keepends 为 true,否则换行符不会包含在生成的列表中。" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -#: flwr.common.EventType.startswith:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." -msgstr "" -"如果 S 以指定的前缀开始,则返回 True,否则返回 False。如果选择 start,则从该位置开始测试 S。如果使用可选的 " -"end,则在该位置停止比较 S。" +msgid "Metrics recod." +msgstr "指标记录。" -#: flwr.common.EventType.title:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." -msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: flwr.common.EventType.translate:5 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "table" -msgstr "数据库" +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" -#: flwr.common.EventType.translate:4 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." -msgstr "翻译表,必须是 Unicode 序号到 Unicode 序号、字符串或无的映射。" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: flwr.common.EventType.translate:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -"表必须通过 __getitem__ 实现查找/索引,例如字典或列表。 如果该操作引发 LookupError,该字符将保持不变。 映射为 None" -" 的字符将被删除。" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" -#: flwr.common.EventType.zfill:3 of -#, fuzzy -msgid "The string is never truncated." -msgstr "字符串不会被截断。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "模型参数。" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "FitIns" -msgstr "FitIns" +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "Parameters record." +msgstr "参数" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "FitRes" -msgstr "FitRes" +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "服务器发送给客户端的重新连接信息。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet 可存储参数、指标和配置组。" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage 是用于容纳一条指令信息的容器。" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "GetParametersIns" -msgstr "参数" +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "客户端状态。" + +#: ../../source/ref-api/flwr.common.Array.rst:2 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "Array" +msgstr "数组" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#: flwr.common.record.parametersrecord.Array:3 of #, fuzzy -msgid "GetParametersRes" -msgstr "参数" +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#: flwr.common.record.parametersrecord.Array:6 of #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#: flwr.common.record.parametersrecord.Array:8 of #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +#: flwr.common.record.parametersrecord.Array:12 of #, fuzzy -msgid "GetPropertiesIns" -msgstr "GetPropertiesIns" +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +#: flwr.common.record.parametersrecord.Array:15 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "A buffer of bytes containing the data." +msgstr "包含数据的字节缓冲区。" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +#: ../../source/ref-api/flwr.common.Array.rst:26::1 #, fuzzy -msgid "GetPropertiesRes" -msgstr "GetPropertiesRes" +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid "Return the array as a NumPy array." +msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`properties `\\" -msgstr ":py:obj:`properties `\\" +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" -#: ../../source/ref-api/flwr.common.Message.rst:2 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "Message" -msgstr "服务器端" +msgid ":py:obj:`shape `\\" +msgstr "server.strategy.Strategy" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "A dataclass including information about the message to be executed." -msgstr "数据类型,包括要执行的信息的相关信息。" +msgid ":py:obj:`stype `\\" +msgstr "server.strategy.Strategy" -#: flwr.common.message.Message:5 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." -msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy -msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." -msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" +msgid "ClientMessage" +msgstr "客户端" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" -msgstr "" -":py:obj:`create_error_reply `\\ " -"\\(error\\, ttl\\)" +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid "Construct a reply message indicating an error happened." -msgstr "构建一条回复信息,说明发生了错误。" +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -":py:obj:`create_reply `\\ \\(content\\," -" ttl\\)" - -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -#, fuzzy -msgid "Create a reply to this message with specified content and TTL." -msgstr "以指定的内容和 TTL 创建对该信息的回复。" +":py:obj:`get_parameters_res " +"`\\" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr ":py:obj:`has_content `\\ \\(\\)" +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" +":py:obj:`get_properties_res " +"`\\" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of +#: ../../source/ref-api/flwr.common.Code.rst:2 #, fuzzy -msgid "Return True if message has content, else False." -msgstr "如果信息有内容,则返回 True,否则返回 False。" +msgid "Code" +msgstr "代码" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.typing.Code:1 of #, fuzzy -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr ":py:obj:`has_error `\\ \\(\\)" +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "Return True if message has an error, else False." -msgstr "如果信息有错误,则返回 True,否则返回 False。" +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid ":py:obj:`content `\\" -msgstr ":py:obj:`content `\\" +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "The content of this message." -msgstr "评估客户端的反应。" +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid ":py:obj:`error `\\" -msgstr ":py:obj:`error `\\" +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "Error captured by this message." -msgstr "该信息捕捉到的错误。" +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.Config.rst:2 #, fuzzy -msgid ":py:obj:`metadata `\\" -msgstr ":py:obj:`metadata `\\" +msgid "Config" +msgstr "配置日志记录" -#: flwr.common.message.Message.create_error_reply:3 of +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 #, fuzzy -msgid "The error that was encountered." -msgstr "遇到的错误。" +msgid "ConfigsRecord" +msgstr "配置日志记录" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -"该信息的有效时间(秒)。如果未设置,则将根据收到的信息过期前的剩余时间来设置。其计算公式为:ttl = msg.meta.ttl - " -"(reply.meta.created_at - msg.meta.created_at)" - -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -#, fuzzy +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" + +#: flwr.common.record.configsrecord.ConfigsRecord:3 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" -msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收到的信息过期前的剩余时间来设置。其计算公式如下" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -#, fuzzy -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" -#: flwr.common.message.Message.create_reply:3 of -#, fuzzy +#: flwr.common.record.configsrecord.ConfigsRecord:13 of msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " -"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " -"\"reply_to_message \"设置为该消息的 ID。" -#: flwr.common.message.Message.create_reply:7 of -#, fuzzy -msgid "The content for the reply message." -msgstr "回复信息的内容。" +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." +msgstr "" -#: flwr.common.message.Message.create_reply:16 of -#, fuzzy -msgid "A new `Message` instance representing the reply." -msgstr "代表回复的新的 `Message` 实例。" +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -#, fuzzy -msgid "MessageType" -msgstr "返回类型" +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`EVALUATE `\\" -msgstr ":py:obj:`EVALUATE `\\" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`QUERY `\\" -msgstr ":py:obj:`QUERY `\\" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of #, fuzzy -msgid ":py:obj:`TRAIN `\\" -msgstr ":py:obj:`TRAIN `\\" +msgid "Return number of Bytes stored in this object." +msgstr "返回存储在此对象中的字节数。" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "MessageTypeLegacy" -msgstr "MessageTypeLegacy" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`GET_PARAMETERS `\\" -msgstr ":py:obj:`GET_PARAMETERS `\\" +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`GET_PROPERTIES `\\" -msgstr ":py:obj:`GET_PROPERTIES `\\" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "An identifier for the current run." -msgstr "当前运行的标识符。" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of #, fuzzy -msgid "An identifier for the current message." -msgstr "当前信息的标识符。" +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "An identifier for the node sending this message." -msgstr "发送此信息的节点的标识符。" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -#, fuzzy -msgid "An identifier for the node receiving this message." -msgstr "接收此信息的节点的标识符。" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "An identifier for the message this message replies to." -msgstr "该信息回复的信息的标识符。" +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.common.message.Metadata:13 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." -msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.common.message.Metadata:16 of -#, fuzzy -msgid "Time-to-live for this message in seconds." -msgstr "该信息的有效时间。" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "编码接收端要执行的操作的字符串。" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.common.message.Metadata:21 of +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of #, fuzzy -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." -msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" +msgid "This function counts booleans as occupying 1 Byte." +msgstr "该函数将布尔值计算为占用 1 个字节。" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:2 #, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr ":py:obj:`ttl `\\" +msgid "Context" +msgstr "背景" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.context.Context:3 of #, fuzzy -msgid "Unix timestamp when the message was created." -msgstr "创建信息时的 Unix 时间戳。" +msgid "The ID that identifies the node." +msgstr "错误的标识符。" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`dst_node_id `\\" -msgstr ":py:obj:`dst_node_id `\\" +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.context.Context:8 of #, fuzzy -msgid ":py:obj:`group_id `\\" -msgstr ":py:obj:`group_id `\\" +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "An identifier for grouping messages." -msgstr "用于分组信息的标识符。" +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`message_id `\\" -msgstr ":py:obj:`message_id `\\" +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`message_type `\\" -msgstr ":py:obj:`message_type `\\" +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`partition_id `\\" -msgstr ":py:obj:`partition_id `\\" +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 #, fuzzy -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" +msgid "DisconnectRes" +msgstr "断开Res" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 #, fuzzy -msgid ":py:obj:`reply_to_message `\\" -msgstr ":py:obj:`reply_to_message `\\" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Error.rst:2 #, fuzzy -msgid ":py:obj:`run_id `\\" -msgstr ":py:obj:`run_id `\\" +msgid "Error" +msgstr "错误" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.message.Error:3 of #, fuzzy -msgid ":py:obj:`src_node_id `\\" -msgstr ":py:obj:`src_node_id `\\" +msgid "An identifier for the error." +msgstr "错误的标识符。" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.message.Error:5 of #, fuzzy -msgid ":py:obj:`ttl `\\" -msgstr ":py:obj:`ttl `\\" +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "出错原因(如异常堆栈跟踪)" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid "Time-to-live for this message." -msgstr "该信息的有效时间。" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of #, fuzzy -msgid "MetricsRecord" -msgstr "MetricsRecord" +msgid "Error code." +msgstr "错误代码。" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " -":py:class:`~typing.List`\\ [:py:class:`float`]]]" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid "Reason reported about the error." +msgstr "报告的错误原因。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid "EvaluateIns" +msgstr "说明" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid "EvaluateRes" +msgstr "评估Res" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "NDArray" -msgstr "NDArray" +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: ../../source/ref-api/flwr.common.EventType.rst:2 #, fuzzy -msgid ":py:obj:`tensors `\\" -msgstr ":py:obj:`tensors `\\" +msgid "EventType" +msgstr "返回类型" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: flwr.common.telemetry.EventType:1 of #, fuzzy -msgid ":py:obj:`tensor_type `\\" -msgstr ":py:obj:`tensor_type `\\" +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ParametersRecord" -msgstr "参数" +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of #, fuzzy -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgid "Encode the string using the codec registered for encoding." +msgstr "使用注册的编码解码器对字符串进行编码。" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." -msgstr "" -"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " -"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "返回用 new 替换子串 old 的所有出现次数的副本。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "使用 sep 作为分隔符,返回字符串中的子字符串列表。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid "Concatenate any number of strings." +msgstr "连接任意数量的字符串。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of #, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +msgid "Return a capitalized version of the string." +msgstr "返回字符串的大写版本。" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." -msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of #, fuzzy -msgid "ReconnectIns" -msgstr "启用 SSL 连接" +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "返回适合无例比较的字符串版本。" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`seconds `\\" -msgstr ":py:obj:`seconds `\\" +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of #, fuzzy -msgid "RecordSet" -msgstr "RecordSet" +msgid "Return a version of the string where each word is titlecased." +msgstr "返回字符串的版本,其中每个单词都使用了标题大小写。" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`configs_records `\\" -msgstr ":py:obj:`configs_records `\\" +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of #, fuzzy -msgid "Dictionary holding ConfigsRecord instances." -msgstr "包含 ConfigsRecord 实例的字典。" +msgid "Return a centered string of length width." +msgstr "返回客户端的属性集。" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`metrics_records `\\" -msgstr ":py:obj:`metrics_records `\\" +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Dictionary holding MetricsRecord instances." -msgstr "保存 MetricsRecord 实例的字典。" +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "返回子字符串 sub 在字符串 S[start:end] 中非重叠出现的次数。" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`parameters_records `\\" -msgstr ":py:obj:`parameters_records `\\" +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of #, fuzzy -msgid "Dictionary holding ParametersRecord instances." -msgstr "存放 ParametersRecord 实例的字典。" +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "返回使用空格扩展所有制表符的副本。" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ServerMessage" -msgstr "服务器端" +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`evaluate_ins `\\" -msgstr ":py:obj:`evaluate_ins `\\" +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最低索引,且 sub 包含在 S[start:end] 中。" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`fit_ins `\\" -msgstr ":py:obj:`fit_ins `\\" +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of #, fuzzy -msgid "" -":py:obj:`get_parameters_ins " -"`\\" -msgstr "" -":py:obj:`get_parameters_ins " -"`\\" +msgid "Partition the string into three parts using the given separator." +msgstr "使用给定的分隔符将字符串分为三部分。" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`get_properties_ins " -"`\\" -msgstr "" -":py:obj:`get_properties_ins " -"`\\" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.common.Status.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Status" -msgstr "客户端状态。" +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -#, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" - -#: ../../source/ref-api/flwr.common.Status.rst:29::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of #, fuzzy -msgid ":py:obj:`message `\\" -msgstr ":py:obj:`message `\\" +msgid "Return a left-justified string of length width." +msgstr "返回长度为 width 的左对齐字符串。" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "array\\_from\\_numpy" -msgstr "array\\_from\\_numpy" +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of #, fuzzy -msgid "bytes\\_to\\_ndarray" -msgstr "bytes\\_to\\_ndarray" +msgid "Return a copy of the string converted to lowercase." +msgstr "返回转换为小写的字符串副本。" -#: ../../source/ref-api/flwr.common.configure.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "configure" -msgstr "配置日志记录" +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.event.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of #, fuzzy -msgid "event" -msgstr "事件" +msgid "Return a copy of the string with leading whitespace removed." +msgstr "返回去掉前导空白的字符串副本。" -#: ../../source/ref-api/flwr.common.log.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "log" -msgstr "登录" - -#: logging.Logger.log:3 of msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." -msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" - -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ndarray\\_to\\_bytes" -msgstr "ndarray\\_to\\_bytes" +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最高索引,且 sub 包含在 S[start:end] 中。" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ndarrays\\_to\\_parameters" -msgstr "ndarrays\\_to\\_parameters" +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.common.now.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "now" -msgstr "现在" +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of #, fuzzy -msgid "parameters\\_to\\_ndarrays" -msgstr "parameters\\_to\\_ndarrays" +msgid "Return a right-justified string of length width." +msgstr "返回长度为 width 的右对齐字符串。" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "服务器" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of #, fuzzy -msgid ":py:obj:`run_server_app `\\ \\(\\)" -msgstr ":py:obj:`run_server_app `\\ \\(\\)" +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "返回去掉尾部空白的字符串副本。" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Run Flower server app." -msgstr "Flower 服务器。" +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`run_superlink `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of #, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "返回字符串中的行列表,以行为分界线。" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." -msgstr "使用 gRPC 传输层启动 Flower 服务器。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +#, fuzzy +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "返回去掉前导和尾部空白的字符串副本。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`ClientManager `\\ \\(\\)" -msgstr ":py:obj:`ClientManager `\\ \\(\\)" +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of #, fuzzy -msgid "Abstract base class for managing Flower clients." -msgstr "Flower 客户端的抽象基类。" +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "将大写字母转换为小写字母,将小写字母转换为大写字母。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" -msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of #, fuzzy -msgid "Abstract base Driver class for the Driver API." -msgstr "Flower 客户端的抽象基类。" +msgid "Replace each character in the string using the given translation table." +msgstr "使用给定的翻译表替换字符串中的每个字符。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`History `\\ \\(\\)" -msgstr ":py:obj:`History `\\ \\(\\)" +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of #, fuzzy -msgid "History class for training and/or evaluation metrics collection." -msgstr "**hist** -- 包含训练和评估指标的对象。" +msgid "Return a copy of the string converted to uppercase." +msgstr "返回转换为大写字符串的副本。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" -msgstr "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Legacy Context." -msgstr "传承背景。" +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "如果 S 以指定前缀开头,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" -msgstr "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "如果 S 以指定后缀结束,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" -msgstr "server.strategy.Strategy" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of #, fuzzy -msgid "Flower ServerApp." -msgstr "Flower 服务器。" +msgid "Return a str with the given prefix string removed if present." +msgstr "返回一个字符串,如果存在,则去掉给定的前缀字符串。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of #, fuzzy -msgid "Flower server config." -msgstr "Flower 服务器。" +msgid "Return a str with the given suffix string removed if present." +msgstr "返回一个字符串,如果存在给定的后缀字符串,则将其删除。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" -msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of #, fuzzy -msgid "Provides a pool of available clients." -msgstr "使用部分可用客户进行评估。" +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "如果字符串中的所有字符都是 ASCII 码,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" -msgstr "server.strategy.Strategy" - -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." -msgstr "包含策略抽象和不同的实现方法。" +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of #, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" -msgstr "server.strategy.Strategy" +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "如果字符串是小写字符串,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Workflows." -msgstr "工作流程" - -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -#, fuzzy -msgid "ClientManager" -msgstr "客户端" +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "如果字符串是大写字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Return all available clients." -msgstr "返回所有可用客户。" +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of #, fuzzy -msgid ":py:obj:`num_available `\\ \\(\\)" -msgstr ":py:obj:`num_available `\\ \\(\\)" +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "如果字符串是带标题的字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Return the number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of #, fuzzy -msgid ":py:obj:`register `\\ \\(client\\)" -msgstr ":py:obj:`register `\\ \\(client\\)" +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "如果字符串是空白字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Register Flower ClientProxy instance." -msgstr "注册 Flower ClientProxy 实例。" +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of #, fuzzy -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "如果字符串是十进制字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Sample a number of Flower ClientProxy instances." -msgstr "取样若干 Flower ClientProxy 实例。" +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of #, fuzzy -msgid ":py:obj:`unregister `\\ \\(client\\)" -msgstr ":py:obj:`unregister `\\ \\(client\\)" +msgid "Return True if the string is a digit string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Unregister Flower ClientProxy instance." -msgstr "取消注册 Flower ClientProxy 实例。" +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of #, fuzzy -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" -msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Wait until at least `num_clients` are available." -msgstr "等待至少 `num_clients` 可用。" +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of #, fuzzy -msgid "**num_available** -- The number of currently available clients." -msgstr "**num_available** -- 当前可用客户端的数量。" +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "如果字符串是字母字符串,则返回 True,否则返回 False。" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of #, fuzzy -msgid "This method is idempotent." -msgstr "这种方法是幂等的。" +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "如果字符串是字母数字字符串,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.Driver.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Driver" -msgstr "服务器" +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of #, fuzzy -msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" -msgstr "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\)" +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "如果字符串是有效的 Python 标识符,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Create a new message with specified parameters." -msgstr "使用指定参数创建新信息。" +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of #, fuzzy -msgid ":py:obj:`get_node_ids `\\ \\(\\)" -msgstr ":py:obj:`get_node_ids `\\ \\(\\)" +msgid "Return True if the string is printable, False otherwise." +msgstr "如果字符串可打印,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Get node IDs." -msgstr "获取节点 ID。" +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of #, fuzzy msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" -msgstr "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" - -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -#, fuzzy -msgid "Pull messages based on message IDs." -msgstr "根据信息 ID 提取信息。" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "在数字字符串左侧填充零,以填满给定宽度的字段。" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Push messages to specified node IDs." -msgstr "向指定的节点 ID 推送信息。" +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "使用 args 和 kwargs 的替换,返回 S 的格式化版本。" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Push messages to specified node IDs and pull the reply messages." -msgstr "向指定的节点 ID 推送信息并提取回复信息。" +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "使用映射中的替换,返回 S 的格式化版本。" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." -msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`TRAIN `\\" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of #, fuzzy -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." -msgstr "新信息的内容。其中包含要发送到目的节点的记录。" +msgid "Return a translation table usable for str.translate()." +msgstr "返回可用于 str.translate() 的翻译表。" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." -msgstr "信息类型,定义接收端要执行的操作。" +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.create_message:12 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "The ID of the destination node to which the message is being sent." -msgstr "信息发送目的地节点的 ID。" +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" -#: flwr.server.driver.driver.Driver.create_message:14 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." -msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." -msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." -msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." -msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" - -#: flwr.server.driver.driver.Driver.pull_messages:6 of -#, fuzzy -msgid "An iterable of message IDs for which reply messages are to be retrieved." -msgstr "要检索回复信息的信息 ID 的可迭代项。" - -#: flwr.server.driver.driver.Driver.pull_messages:9 of -#, fuzzy -msgid "**messages** -- An iterable of messages received." -msgstr "**messages** -- 收到的信息迭代。" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." -msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" - -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -#, fuzzy -msgid "An iterable of messages to be sent." -msgstr "要发送的信息迭代。" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." -msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." -msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" - -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -#, fuzzy -msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" msgstr "" -"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " -"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.History.rst:2 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "History" -msgstr "历史" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" msgstr "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "集中评估" +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add one loss entry (from distributed evaluation)." -msgstr "增加一个损失条目(来自分布式评估)。" +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "集中评估" +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Add metrics entries (from distributed evaluation)." -msgstr "定制的集中/分布式评估" +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of +#: flwr.common.EventType.capitalize:3 of #, fuzzy -msgid "Add metrics entries (from distributed fit)." -msgstr "添加度量条目(来自分布式拟合)。" +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "更具体地说,让第一个字符大写,其余字符小写。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of #, fuzzy -msgid "LegacyContext" -msgstr "遗留上下文" +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "使用指定的填充字符(默认为空格)进行填充。" -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: flwr.common.EventType.count:1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.common.context.Context`" -msgstr "Bases: :py:class:`~flwr.common.context.Context`" +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "返回子串 sub 在字符串 S[start:end] 中非重叠出现的次数。 可选参数 start 和 end 按切分符号解释。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: flwr.common.EventType.encode:3 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr "server.strategy.Strategy" +msgid "encoding" +msgstr "编码" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: flwr.common.EventType.encode:4 of #, fuzzy -msgid ":py:obj:`strategy `\\" -msgstr "server.strategy.Strategy" +msgid "The encoding in which to encode the string." +msgstr "字符串的编码。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: flwr.common.EventType.encode:9 of #, fuzzy -msgid ":py:obj:`client_manager `\\" -msgstr ":py:obj:`client_manager `\\" +msgid "errors" +msgstr "错误" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: flwr.common.EventType.encode:6 of #, fuzzy -msgid ":py:obj:`history `\\" -msgstr "server.strategy.Strategy" - -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -#, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" - -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "服务器" +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" +"编码错误的错误处理方案。默认值为 \"strict\",即编码错误会引发 UnicodeEncodeError。 其他可能的值包括 " +"\"ignore\"、\"replace \"和 \"xmlcharrefreplace\",以及通过 codecs.register_error" +" 注册的、可处理 UnicodeEncodeErrror 的其他名称。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.endswith:1 of #, fuzzy -msgid ":py:obj:`client_manager `\\ \\(\\)" -msgstr ":py:obj:`client_manager `\\ \\(\\)" +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" +"如果 S 以指定后缀结束,则返回 True,否则返回 False。如果起始位置可选,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。后缀也可以是要尝试的字符串元组。" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.expandtabs:3 of #, fuzzy -msgid "Return ClientManager." -msgstr "返回客户端(本身)。" +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "如果未给出制表符大小,则假定制表符大小为 8 个字符。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of #, fuzzy msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" -msgstr "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 的最低索引,即 sub 包含在 S[start:end] 中。 可选参数 start 和 end 按切分符号解释。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of #, fuzzy -msgid "Send shutdown signal to all clients." -msgstr "向所有客户端发送关闭信号。" +msgid "Return -1 on failure." +msgstr "失败时返回-1。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.format:1 of #, fuzzy msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" -msgstr "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -#, fuzzy -msgid "Validate current global model on a number of clients." -msgstr "当前(全局)模型参数。" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "使用来自 args 和 kwargs 的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.format_map:1 of #, fuzzy -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "使用映射中的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of #, fuzzy -msgid "Run federated averaging for a number of rounds." -msgstr "联邦平均动量策略。" +msgid "Raises ValueError when the substring is not found." +msgstr "如果未找到子串,则引发 ValueError。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.isalnum:3 of #, fuzzy msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" -msgstr "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母数字,且字符串中至少有一个字符,则该字符串为字母数字字符串。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of +#: flwr.common.EventType.isalpha:3 of #, fuzzy -msgid "Perform a single round of federated averaging." -msgstr "本轮联邦学习。" +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母,并且字符串中至少有一个字符,那么该字符串就是字母字符串。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.isascii:3 of #, fuzzy msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 字符的码位范围为 U+0000-U+007F。空字符串也是 ASCII 字符。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of +#: flwr.common.EventType.isdecimal:3 of #, fuzzy -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是十进制,并且字符串中至少有一个字符是十进制,那么该字符串就是十进制字符串。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.isdigit:3 of #, fuzzy -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" -msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,并且字符串中至少有一个字符,那么该字符串就是数字字符串。" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of +#: flwr.common.EventType.isidentifier:3 of #, fuzzy -msgid "Replace server strategy." -msgstr "server.strategy" +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "调用 keyword.iskeyword(s) 测试字符串 s 是否为保留标识符,如 \"def \"或 \"class\"。" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#: flwr.common.EventType.islower:3 of #, fuzzy -msgid "ServerApp" -msgstr "服务器" +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中的所有大小写字符都是小写,且字符串中至少有一个大小写字符,则该字符串为小写字符串。" -#: flwr.server.server_app.ServerApp:5 of +#: flwr.common.EventType.isnumeric:3 of #, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "使用现有策略" +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,且字符串中至少有一个字符,则该字符串为数字字符串。" -#: flwr.server.server_app.ServerApp:15 of +#: flwr.common.EventType.isprintable:3 of #, fuzzy -msgid "Use the `ServerApp` with a custom main function:" -msgstr "使用带有自定义主函数的 `ServerApp`:" +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "如果字符串的所有字符在 repr() 中都被认为是可打印的,或者字符串为空,那么该字符串就是可打印的。" -#: flwr.server.server_app.ServerApp.main:1::1 of +#: flwr.common.EventType.isspace:3 of #, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是空格,且字符串中至少有一个字符,则该字符串为空格。" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of +#: flwr.common.EventType.istitle:3 of #, fuzzy -msgid "Return a decorator that registers the main fn with the server app." -msgstr "返回向服务器应用程序注册 main fn 的装饰器。" +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "在标题大小写字符串中,大写和标题大小写字符只能跟在无大小写字符之后,小写字符只能跟在有大小写字符之后。" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#: flwr.common.EventType.isupper:3 of #, fuzzy -msgid "ServerConfig" -msgstr "服务器" +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中所有带大小写的字符都是大写,并且字符串中至少有一个带大小写的字符,则该字符串为大写字符串。" -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.common.EventType.join:3 of #, fuzzy msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." -msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "方法被调用的字符串会被插入每个给定的字符串之间。结果将以新字符串的形式返回。" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: flwr.common.EventType.join:6 of #, fuzzy -msgid ":py:obj:`num_rounds `\\" -msgstr ":py:obj:`num_rounds `\\" +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "示例:'.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of #, fuzzy -msgid ":py:obj:`round_timeout `\\" -msgstr ":py:obj:`round_timeout `\\" +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "如果给定的是 chars 而不是 None,则删除 chars 中的字符。" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +#: flwr.common.EventType.maketrans:3 of #, fuzzy -msgid "SimpleClientManager" -msgstr "SimpleClientManager" +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." +msgstr "" +"如果只有一个参数,则必须是一个将 Unicode 序号(整数)或字符映射到 Unicode 序号、字符串或 None " +"的字典。字符键将被转换为序号。如果有两个参数,它们必须是长度相等的字符串,在生成的字典中,x 中的每个字符将被映射到 y 中相同位置的字符。" -#: flwr.server.client_manager.SimpleClientManager:1 of +#: flwr.common.EventType.partition:3 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "它会在字符串中搜索分隔符。 如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.partition:7 of #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "如果找不到分隔符,则返回一个包含原始字符串和两个空字符串的 3 元组。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.removeprefix:3 of #, fuzzy msgid "" -":py:obj:`num_available `\\" -" \\(\\)" -msgstr "" -":py:obj:`num_available `\\" -" \\(\\)" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "如果字符串以前缀字符串开始,则返回 string[len(prefix):]。否则,返回原始字符串的副本。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.removesuffix:3 of #, fuzzy msgid "" -":py:obj:`register `\\ " -"\\(client\\)" -msgstr "" -":py:obj:`register `\\ " -"\\(client\\)" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.replace:5 of #, fuzzy -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgid "count" +msgstr "背景" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.replace:4 of #, fuzzy msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" -msgstr "" -":py:obj:`unregister `\\ " -"\\(client\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "要替换的最大出现次数。-1(默认值)表示替换所有出现次数。" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.replace:7 of #, fuzzy msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" -msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "如果给出可选参数 count,则只替换第一个计数出现的次数。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of #, fuzzy msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." -msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 且 sub 包含在 S[start:end] 中的最高索引。 可选参数 start 和 end 按切分符号解释。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#: flwr.common.EventType.rpartition:3 of #, fuzzy -msgid "The number of clients to wait for." -msgstr "需要等待的客户数量。" +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "它会从字符串的末尾开始搜索分隔符。如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +#: flwr.common.EventType.rpartition:7 of #, fuzzy -msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of #, fuzzy -msgid "**success**" -msgstr "**success**" +msgid "sep" +msgstr "sep" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of #, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" +msgid "The separator used to split the string." +msgstr "用于分割字符串的分隔符。" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of #, fuzzy -msgid "run\\_fleet\\_api" -msgstr "run\\_fleet\\_api" +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of #, fuzzy -msgid "run\\_server\\_app" -msgstr "run\\_server\\_app" +msgid "maxsplit" +msgstr "最大分割" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of #, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "最大分割次数(从左边开始)。-1(默认值)表示没有限制。" -#: ../../source/ref-api/flwr.server.start_server.rst:2 +#: flwr.common.EventType.rsplit:13 of #, fuzzy -msgid "start\\_server" -msgstr "server.start_server" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." -msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +msgid "Splitting starts at the end of the string and works to the front." +msgstr "从琴弦末端开始分弦,一直到琴弦前端。" -#: flwr.server.app.start_server:5 of +#: flwr.common.EventType.split:13 of +#, fuzzy msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "注意,str.split() 主要适用于有意分隔的数据。 对于包含标点符号的自然文本,可以考虑使用正则表达式模块。" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: flwr.common.EventType.splitlines:3 of +#, fuzzy msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "除非指定 keepends 为 true,否则换行符不会包含在生成的列表中。" -#: flwr.server.app.start_server:12 of +#: flwr.common.EventType.startswith:1 of +#, fuzzy msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +"如果 S 以指定的前缀开始,则返回 True,否则返回 False。如果选择 start,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。" -#: flwr.server.app.start_server:16 of +#: flwr.common.EventType.title:3 of +#, fuzzy msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.translate:5 of +#, fuzzy +msgid "table" +msgstr "数据库" + +#: flwr.common.EventType.translate:4 of +#, fuzzy msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." -msgstr "" -"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "翻译表,必须是 Unicode 序号到 Unicode 序号、字符串或无的映射。" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.translate:7 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +"表必须通过 __getitem__ 实现查找/索引,例如字典或列表。 如果该操作引发 LookupError,该字符将保持不变。 映射为 None" +" 的字符将被删除。" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" +#: flwr.common.EventType.zfill:3 of +#, fuzzy +msgid "The string is never truncated." +msgstr "字符串不会被截断。" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." -msgstr "CA 证书。" +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +#, fuzzy +msgid "FitIns" +msgstr "FitIns" -#: flwr.server.app.start_server:33 of -msgid "server certificate." -msgstr "服务器证书。" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#, fuzzy +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.app.start_server:34 of -msgid "server private key." -msgstr "服务器私人密钥。" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "**hist** -- 包含训练和评估指标的对象。" +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +#, fuzzy +msgid "FitRes" +msgstr "FitRes" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" -msgstr "启动不安全的服务器:" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" -msgstr "启动支持 SSL 的服务器:" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:2 +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 #, fuzzy -msgid "strategy" -msgstr "Krum 策略。" +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 #, fuzzy -msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -msgstr "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." -msgstr "Bulyan 策略。" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#, fuzzy +msgid "GetParametersIns" +msgstr "参数" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 #, fuzzy -msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" -msgstr "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 #, fuzzy -msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -msgstr "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgid "GetParametersRes" +msgstr "参数" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#, fuzzy +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 #, fuzzy -msgid "Strategy wrapper for central DP with client-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 #, fuzzy -msgid "Strategy wrapper for central DP with server-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 #, fuzzy -msgid "Strategy wrapper for central DP with client-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy -msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +msgid "Message" +msgstr "服务器端" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of #, fuzzy -msgid "Strategy wrapper for central DP with server-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid "A dataclass including information about the message to be executed." +msgstr "数据类型,包括要执行的信息的相关信息。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message:5 of #, fuzzy msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." -msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message:8 of #, fuzzy msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." -msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." -msgstr "联邦平均策略。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +#, fuzzy +msgid "Construct a reply message indicating an error happened." +msgstr "构建一条回复信息,说明发生了错误。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of #, fuzzy -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid "Create a reply to this message with specified content and TTL." +msgstr "以指定的内容和 TTL 创建对该信息的回复。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." -msgstr "联邦平均动量策略。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#, fuzzy +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of #, fuzzy -msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid "Return True if message has content, else False." +msgstr "如果信息有内容,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of #, fuzzy -msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgid "Return True if message has an error, else False." +msgstr "如果信息有错误,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "Federated Optim strategy." -msgstr "联邦优化策略。" +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of #, fuzzy -msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid "The content of this message." +msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." -msgstr "联邦优化策略。" +#: flwr.common.Message.content:1::1 of +#, fuzzy +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of #, fuzzy -msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgid "Error captured by this message." +msgstr "该信息捕捉到的错误。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." -msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#: flwr.common.message.Message.create_error_reply:3 of #, fuzzy -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +msgid "The error that was encountered." +msgstr "遇到的错误。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"该信息的有效时间(秒)。如果未设置,则将根据收到的信息过期前的剩余时间来设置。其计算公式为:ttl = msg.meta.ttl - " +"(reply.meta.created_at - msg.meta.created_at)" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收到的信息过期前的剩余时间来设置。其计算公式如下" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +#, fuzzy +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" + +#: flwr.common.message.Message.create_reply:3 of #, fuzzy msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " +"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " +"\"reply_to_message \"设置为该消息的 ID。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +#: flwr.common.message.Message.create_reply:7 of +#, fuzzy +msgid "The content for the reply message." +msgstr "回复信息的内容。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.message.Message.create_reply:16 of #, fuzzy -msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid "A new `Message` instance representing the reply." +msgstr "代表回复的新的 `Message` 实例。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +#, fuzzy +msgid "MessageType" +msgstr "返回类型" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." -msgstr "可配置的容错 FedAvg 策略实施。" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 #, fuzzy -msgid "Krum [Blanchard et al., 2017] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 #, fuzzy -msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" -msgstr "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." -msgstr "可配置的 QFedAvg 策略实施。" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#, fuzzy +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of #, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "An identifier for the current run." +msgstr "当前运行的标识符。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." -msgstr "服务器策略实现的抽象基类。" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +#, fuzzy +msgid "An identifier for the current message." +msgstr "当前信息的标识符。" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of #, fuzzy -msgid "Bulyan" -msgstr "Bulyan" +msgid "An identifier for the node sending this message." +msgstr "发送此信息的节点的标识符。" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" -msgstr "server.strategy.DPFedAvgFixed" +msgid "An identifier for the node receiving this message." +msgstr "接收此信息的节点的标识符。" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +#, fuzzy +msgid "An identifier for the message this message replies to." +msgstr "该信息回复的信息的标识符。" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." -msgstr "训练期间使用客户的比例。默认为 1.0。" +#: flwr.common.message.Metadata:13 of +#, fuzzy +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." -msgstr "验证过程中使用的客户端比例。默认为 1.0。" +#: flwr.common.message.Metadata:16 of +#, fuzzy +msgid "Time-to-live for this message in seconds." +msgstr "该信息的有效时间。" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." -msgstr "训练期间使用的最少客户数。默认为 2。" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +#, fuzzy +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "编码接收端要执行的操作的字符串。" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." -msgstr "验证过程中使用的最少客户端数量。默认为 2。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." -msgstr "系统中客户总数的最小值。默认为 2。" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid "Unix timestamp when the message was created." +msgstr "创建信息时的 Unix 时间戳。" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." -msgstr "系统中恶意客户端的数量。默认为 0。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." -msgstr "用于验证的可选函数。默认为 \"无\"。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." -msgstr "用于配置训练的功能。默认为 \"无\"。" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +#, fuzzy +msgid "An identifier for grouping messages." +msgstr "用于分组信息的标识符。" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." -msgstr "用于配置验证的函数。默认为 \"无\"。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." -msgstr "是否接受包含失败的轮。默认为 True。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." -msgstr "初始全局模型参数。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" -msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" -msgstr "第一聚类规则的参数" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." -msgstr "采用加权平均法计算评估损失总额。" +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +#, fuzzy +msgid "Time-to-live for this message." +msgstr "该信息的有效时间。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Metrics.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Metrics" +msgstr "MetricsRecord" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." -msgstr "使用 Bulyan 技术汇总拟合结果。" +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#, fuzzy +msgid "MetricsRecord" +msgstr "MetricsRecord" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." -msgstr "配置下一轮评估。" +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." -msgstr "配置下一轮训练。" +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." -msgstr "使用评估函数评估模型参数。" +#: flwr.common.record.metricsrecord.MetricsRecord:28 of +msgid "" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." -msgstr "初始化全局模型参数。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." -msgstr "使用部分可用客户进行评估。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" -msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "This class is deprecated and will be removed in a future release." -msgstr "该类已被弃用,将在以后的版本中删除。" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." -msgstr "使用给定的策略汇总评估损失。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.NDArray.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "NDArray" +msgstr "NDArray" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." -msgstr "使用指定策略配置下一轮评估。" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +#, fuzzy +msgid "NDArrays" +msgstr "NDArray" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#, fuzzy +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#, fuzzy +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" + +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "参数" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " +"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." -msgstr "使用策略中的评估函数评估模型参数。" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." -msgstr "使用给定的策略初始化全局模型参数。" +#: flwr.common.record.parametersrecord.ParametersRecord:23 of +msgid "" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." -msgstr "本轮联邦学习。" +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +#, fuzzy +msgid "Let's see some examples:" +msgstr "让我们来看几个例子:" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." -msgstr "客户端管理器,用于管理当前连接的所有客户端。" +#: flwr.common.record.parametersrecord.ParametersRecord:50 of +msgid "" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -"**evaluate_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" -" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" +#: flwr.common.record.parametersrecord.ParametersRecord:83 of +msgid "" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." -msgstr "使用非加权汇总法汇总训练结果。" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#, fuzzy msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." -msgstr "配置包含差分隐私 (DP) 的下一轮训练。" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#, fuzzy +msgid "ReconnectIns" +msgstr "启用 SSL 连接" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#, fuzzy +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 #, fuzzy +msgid "RecordSet" +msgstr "RecordSet" + +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." -msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -"**fit_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -#, fuzzy -msgid "DifferentialPrivacyClientSideAdaptiveClipping" -msgstr "DifferentialPrivacyClientSideAdaptiveClipping" +#: flwr.common.record.recordset.RecordSet:24 of +msgid "" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of +#: flwr.common.record.recordset.RecordSet:29 of #, fuzzy -msgid "Use `adaptiveclipping_mod` modifier at the client side." -msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" +msgid "Let's see an example." +msgstr "让我们来看几个例子:" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " -"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " -"`adaptiveclipping_mod`。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -#, fuzzy -msgid "The strategy to which DP functionalities will be added by this wrapper." -msgstr "该包装器将添加 DP 功能的策略。" +#: flwr.common.record.recordset.RecordSet:66 of +msgid "" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "The noise multiplier for the Gaussian mechanism for model updates." -msgstr "用于模型更新的高斯机制的噪声乘数。" +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "The number of clients that are sampled on each round." -msgstr "每轮取样的客户数。" +msgid "Dictionary holding ConfigsRecord instances." +msgstr "包含 ConfigsRecord 实例的字典。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." -msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of #, fuzzy -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." -msgstr "需要剪切的更新量化值。默认为 0.5。" +msgid "Dictionary holding MetricsRecord instances." +msgstr "保存 MetricsRecord 实例的字典。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." -msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of #, fuzzy -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" +msgid "Dictionary holding ParametersRecord instances." +msgstr "存放 ParametersRecord 实例的字典。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 #, fuzzy -msgid "Create a strategy:" -msgstr "server.strategy" +msgid "ServerMessage" +msgstr "服务器端" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`get_parameters_ins " +"`\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`get_properties_ins " +"`\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of +#: ../../source/ref-api/flwr.common.Status.rst:2 #, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +msgid "Status" +msgstr "客户端状态。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#: ../../source/ref-api/flwr.common.configure.rst:2 #, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "差分隐私" +msgid "configure" +msgstr "配置日志记录" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of +#: ../../source/ref-api/flwr.common.event.rst:2 #, fuzzy -msgid "Use `fixedclipping_mod` modifier at the client side." -msgstr "在客户端使用 `fixedclipping_mod` 修改器。" +msgid "event" +msgstr "事件" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: ../../source/ref-api/flwr.common.log.rst:2 #, fuzzy +msgid "log" +msgstr "登录" + +#: logging.Logger.log:3 of msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." -msgstr "" -"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " -"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " -"\"fixedclipping_mod\"。" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 #, fuzzy -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." -msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" +msgid "ndarray\\_to\\_bytes" +msgstr "ndarray\\_to\\_bytes" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 #, fuzzy -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" +msgid "ndarrays\\_to\\_parameters" +msgstr "ndarrays\\_to\\_parameters" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of +#: ../../source/ref-api/flwr.common.now.rst:2 #, fuzzy -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" +msgid "now" +msgstr "现在" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +#, fuzzy +msgid "parameters\\_to\\_ndarrays" +msgstr "parameters\\_to\\_ndarrays" + +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "服务器" + +#: ../../source/ref-api/flwr.server.rst:22::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "使用 gRPC 传输层启动 Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr ":py:obj:`ClientManager `\\ \\(\\)" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +#, fuzzy +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 客户端的抽象基类。" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +#, fuzzy +msgid "Abstract base Driver class for the Driver API." +msgstr "Flower 客户端的抽象基类。" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid ":py:obj:`History `\\ \\(\\)" +msgstr ":py:obj:`History `\\ \\(\\)" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +#, fuzzy +msgid "History class for training and/or evaluation metrics collection." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "然后将汇总结果序列化:" +msgid "Legacy Context." +msgstr "传承背景。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy -msgid "DifferentialPrivacyServerSideAdaptiveClipping" -msgstr "DifferentialPrivacyServerSideAdaptiveClipping" +msgid "Flower server config." +msgstr "Flower 服务器。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" -msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" +msgid "Provides a pool of available clients." +msgstr "使用部分可用客户进行评估。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "包含策略抽象和不同的实现方法。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "Workflows." +msgstr "工作流程" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid "ClientManager" +msgstr "客户端" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "差分隐私" +msgid "Return all available clients." +msgstr "返回所有可用客户。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr ":py:obj:`num_available `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid "Return the number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr ":py:obj:`register `\\ \\(client\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of #, fuzzy -msgid "Compute the updates, clip, and pass them for aggregation." -msgstr "计算更新、剪辑并将其传递给聚合。" +msgid "Register Flower ClientProxy instance." +msgstr "注册 Flower ClientProxy 实例。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "Sample a number of Flower ClientProxy instances." +msgstr "取样若干 Flower ClientProxy 实例。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr ":py:obj:`unregister `\\ \\(client\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#, fuzzy +msgid "Unregister Flower ClientProxy instance." +msgstr "取消注册 Flower ClientProxy 实例。" + +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of #, fuzzy -msgid "Afterward, add noise to the aggregated parameters." -msgstr "然后,在汇总参数中添加噪声。" +msgid "Wait until at least `num_clients` are available." +msgstr "等待至少 `num_clients` 可用。" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of #, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +msgid "**num_available** -- The number of currently available clients." +msgstr "**num_available** -- 当前可用客户端的数量。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "This method is idempotent." +msgstr "这种方法是幂等的。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." -msgstr "使用加权平均法汇总拟合结果。" +#: ../../source/ref-api/flwr.server.Driver.rst:2 +#, fuzzy +msgid "Driver" +msgstr "服务器" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Create a new message with specified parameters." +msgstr "使用指定参数创建新信息。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr ":py:obj:`get_node_ids `\\ \\(\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Get node IDs." +msgstr "获取节点 ID。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +#, fuzzy +msgid "Pull messages based on message IDs." +msgstr "根据信息 ID 提取信息。" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" - -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" -msgstr "FedAdagrad" +":py:obj:`push_messages `\\ " +"\\(messages\\)" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" -msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" +msgid "Push messages to specified node IDs." +msgstr "向指定的节点 ID 推送信息。" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." -msgstr "指标汇总功能,可选。" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." -msgstr "服务器端学习率。默认为 1e-1。" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +#, fuzzy +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "向指定的节点 ID 推送信息并提取回复信息。" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." -msgstr "客户端学习率。默认为 1e-1。" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." -msgstr "控制算法的适应度。默认为 1e-9。" +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "运行模拟" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:3 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:6 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "新信息的内容。其中包含要发送到目的节点的记录。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:9 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "信息类型,定义接收端要执行的操作。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:12 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "The ID of the destination node to which the message is being sent." +msgstr "信息发送目的地节点的 ID。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:14 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:17 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:23 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#: flwr.server.driver.driver.Driver.pull_messages:6 of #, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" - -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." -msgstr "动量参数。默认为 0.9。" - -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." -msgstr "第二动量参数。默认为 0.99。" +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "要检索回复信息的信息 ID 的可迭代项。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:9 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid "**messages** -- An iterable of messages received." +msgstr "**messages** -- 收到的信息迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid "An iterable of messages to be sent." +msgstr "要发送的信息迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:9 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:14 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " +"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#: ../../source/ref-api/flwr.server.History.rst:2 #, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" - -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" -msgstr "实施基于 https://arxiv.org/abs/1602.05629" - -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." -msgstr "" -"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " -"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" +msgid "History" +msgstr "历史" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " -"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedavg.FedAvg:33 of +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy -msgid "Enable (True) or disable (False) in-place aggregation of model updates." -msgstr "启用(真)或禁用(假)模型更新的就地聚合。" +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Add one loss entry (from distributed evaluation)." +msgstr "增加一个损失条目(来自分布式评估)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Add metrics entries (from centralized evaluation)." +msgstr "集中评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Add metrics entries (from distributed evaluation)." +msgstr "定制的集中/分布式评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +msgid "Add metrics entries (from distributed fit)." +msgstr "添加度量条目(来自分布式拟合)。" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 #, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +msgid "LegacyContext" +msgstr "遗留上下文" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "Bases: :py:class:`~flwr.common.context.Context`" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" -msgstr "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +msgid ":py:obj:`client_manager `\\" +msgstr ":py:obj:`client_manager `\\" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`history `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`node_id `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`node_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" -msgstr "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +msgid ":py:obj:`run_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "服务器" + +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" -msgstr "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +msgid "Return ClientManager." +msgstr "返回客户端(本身)。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Send shutdown signal to all clients." +msgstr "向所有客户端发送关闭信号。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "将参数对象转换为 NumPy ndarrays。" +msgid "Validate current global model on a number of clients." +msgstr "当前(全局)模型参数。" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1909.06335" -msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" - -#: flwr.server.strategy.fedavgm.FedAvgM:25 of -msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." -msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" - -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." -msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" +msgid "Run federated averaging for a number of rounds." +msgstr "联邦平均动量策略。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Perform a single round of federated averaging." +msgstr "本轮联邦学习。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Replace server strategy." +msgstr "server.strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "ServerApp" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp:5 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "使用现有策略" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#: flwr.server.server_app.ServerApp:17 of #, fuzzy -msgid "FedMedian" -msgstr "联邦医保" +msgid "Use the `ServerApp` with a custom main function:" +msgstr "使用带有自定义主函数的 `ServerApp`:" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Return a decorator that registers the main fn with the server app." +msgstr "返回向服务器应用程序注册 main fn 的装饰器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." -msgstr "使用中位数汇总拟合结果。" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +#, fuzzy +msgid "ServerAppComponents" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:3 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`client_manager " +"`\\" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "FedOpt" -msgstr "FedOpt" +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`run_server_app `\\ \\(\\)" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." -msgstr "动量参数。默认为 0.0。" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "第二动量参数。默认为 0.0。" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_config.ServerConfig:3 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`num_rounds `\\" +msgstr ":py:obj:`num_rounds `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid ":py:obj:`round_timeout `\\" +msgstr ":py:obj:`round_timeout `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "SimpleClientManager" +msgstr "SimpleClientManager" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#, fuzzy +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`num_available `\\" +" \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`register `\\ " +"\\(client\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy -msgid "FedProx" -msgstr "FedProx" - -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" -msgstr "实施基于 https://arxiv.org/abs/1812.06127" - -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" -msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" - -#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#, fuzzy msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." -msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" -msgstr "例如,在 PyTorch 中,损失将从:" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#, fuzzy +msgid "The number of clients to wait for." +msgstr "需要等待的客户数量。" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" -msgstr "致:" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +#, fuzzy +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +#, fuzzy +msgid "**success**" +msgstr "**success**" + +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "server.start_server" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#: flwr.server.app.start_server:5 of msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." -msgstr "其中,\"global_params \"是训练前的参数副本。" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: flwr.server.app.start_server:12 of msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -"优化中使用的近端项权重。0.0 使该策略等同于 " -"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "CA 证书。" + +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "服务器证书。" + +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "启动不安全的服务器:" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "启动支持 SSL 的服务器:" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "strategy" +msgstr "Krum 策略。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" -msgstr "向客户发送近端因子mu" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "FedTrimmedAvg" -msgstr "server.strategy.FedTrimmedAvg" +msgid "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of #, fuzzy -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" - -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." -msgstr "截取分布两个尾部的分数。默认为 0.2。" +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "使用修剪平均值汇总拟合结果。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +#, fuzzy +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "联邦平均策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "FedXgbBagging" -msgstr "FedXgbBagging" +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "联邦平均动量策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of #, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "采用加权平均法计算评估损失总额。" +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy -msgid "Aggregate fit results using bagging." -msgstr "使用 Bulyan 技术汇总拟合结果。" +msgid "Federated Optim strategy." +msgstr "联邦优化策略。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "联邦优化策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of #, fuzzy -msgid "FedXgbCyclic" -msgstr "FedXgbCyclic" +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -#, fuzzy -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." -msgstr "" -"该策略已被弃用,但在 Flower Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "服务器策略实现的抽象基类。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Bulyan" +msgstr "Bulyan" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "server.strategy.DPFedAvgFixed" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "训练期间使用客户的比例。默认为 1.0。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "验证过程中使用的客户端比例。默认为 1.0。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "训练期间使用的最少客户数。默认为 2。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "验证过程中使用的最少客户端数量。默认为 2。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "系统中客户总数的最小值。默认为 2。" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -#, fuzzy -msgid "FedYogi" -msgstr "FedYogi" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "系统中恶意客户端的数量。默认为 0。" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -#, fuzzy -msgid "Server-side learning rate. Defaults to 1e-2." -msgstr "服务器端学习率。默认为 1e-1。" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "用于验证的可选函数。默认为 \"无\"。" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -#, fuzzy -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "客户端学习率。默认为 1e-1。" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "用于配置训练的功能。默认为 \"无\"。" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -#, fuzzy -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." -msgstr "控制算法的适应度。默认为 1e-9。" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "用于配置验证的函数。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "是否接受包含失败的轮。默认为 True。" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "初始全局模型参数。" + +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "第一聚类规则的参数" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "采用加权平均法计算评估损失总额。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit `\\ " "\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "使用 Bulyan 技术汇总拟合结果。" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "配置下一轮评估。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit `\\ " "\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "配置下一轮训练。" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "使用评估函数评估模型参数。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "初始化全局模型参数。" #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" msgstr "" ":py:obj:`num_evaluation_clients " -"`\\ " +"`\\ " "\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "使用部分可用客户进行评估。" + #: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -#, fuzzy -msgid "Krum" -msgstr "Krum" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" -#: flwr.server.strategy.krum.Krum:3 of +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1703.02757" -msgstr "实施基于 https://arxiv.org/abs/2304.07537。" +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." -msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#, fuzzy +msgid "This class is deprecated and will be removed in a future release." +msgstr "该类已被弃用,将在以后的版本中删除。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" ":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "使用给定的策略汇总评估损失。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." -msgstr "使用 Krum 汇总拟合结果。" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "使用指定策略配置下一轮评估。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "使用策略中的评估函数评估模型参数。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "使用给定的策略初始化全局模型参数。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "本轮联邦学习。" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -#, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "客户端管理器,用于管理当前连接的所有客户端。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**evaluate_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" +" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "" +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " +":py:obj:`aggregate_fit " +"`\\ " "\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "使用非加权汇总法汇总训练结果。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" ":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`configure_fit `\\ " +":py:obj:`configure_fit " +"`\\ " "\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "配置包含差分隐私 (DP) 的下一轮训练。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`evaluate `\\ " +":py:obj:`evaluate `\\ " "\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" msgstr "" ":py:obj:`initialize_parameters " -"`\\ " +"`\\ " "\\(client\\_manager\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**fit_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 #, fuzzy -msgid "Strategy" -msgstr "Krum 策略。" +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "DifferentialPrivacyClientSideAdaptiveClipping" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +#, fuzzy +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 #: of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " +"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " +"`adaptiveclipping_mod`。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 #: of -msgid "Aggregate evaluation results." -msgstr "聚合评估结果。" +#, fuzzy +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "该包装器将添加 DP 功能的策略。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 #: of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "用于模型更新的高斯机制的噪声乘数。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." -msgstr "汇总训练结果。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +#, fuzzy +msgid "The number of clients that are sampled on each round." +msgstr "每轮取样的客户数。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 #: of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +#, fuzzy +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "需要剪切的更新量化值。默认为 0.5。" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 #: of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 #: of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." -msgstr "评估当前的模型参数。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "server.strategy" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 #: of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." -msgstr "初始化(全局)模型参数。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +#, fuzzy +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." -msgstr "" -"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " -"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." -msgstr "服务器等待客户端更新时发生的异常。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of -msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." -msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " -"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" -" \"中都应该有一个 \"异常\"。" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " -"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" - -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of -msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." -msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" - -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." -msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" - -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." -msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "workflow" -msgstr "工作流程" +msgid "Aggregate training results and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -#, fuzzy -msgid "Default workflow in Flower." -msgstr "Flower 中的默认工作流程。" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "The workflow for the SecAgg+ protocol." -msgstr "SecAgg+ 协议的工作流程。" +msgid "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy -msgid "The workflow for the SecAgg protocol." -msgstr "SecAgg 协议的工作流程。" +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "差分隐私" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of #, fuzzy -msgid "DefaultWorkflow" -msgstr "工作流程" +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "在客户端使用 `fixedclipping_mod` 修改器。" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of #, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "工作流程" +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" +"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " +"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " +"\"fixedclipping_mod\"。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 #: of #, fuzzy msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." -msgstr "" -"SecAgg+ " -"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," -" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 #: of #, fuzzy msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 #: of #, fuzzy -msgid "key shares." -msgstr "关键股份。" +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." -msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." -msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 #: of #, fuzzy -msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." -msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" +msgid "Add noise to the aggregated parameters." +msgstr "然后将汇总结果序列化:" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"在 SecAgg+ " -"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." -msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." -msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." -msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 #, fuzzy -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." -msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "DifferentialPrivacyServerSideAdaptiveClipping" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 #: of #, fuzzy msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." -msgstr "" -"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " -"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 #: of #, fuzzy msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." -msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "Too large `max_weight` may compromise the precision of the quantization." -msgstr "过大的 `max_weight` 可能会影响量化的精度。" +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." -msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." -msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"同样,当 `reconstruction_threshold` " -"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " -"协议的隐私性、稳健性和效率方面发挥着关键作用。" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 #: of #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" -msgstr "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "Execute the 'collect masked vectors' stage." -msgstr "执行 \"收集屏蔽向量 \"阶段。" +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 #: of #, fuzzy -msgid "Execute the 'setup' stage." -msgstr "执行 \"设置 \"阶段。" +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "计算更新、剪辑并将其传递给聚合。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "Execute the 'share keys' stage." -msgstr "执行 \"共享密钥 \"阶段。" +msgid "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 #: of #, fuzzy -msgid "Execute the 'unmask' stage." -msgstr "执行 \"解除屏蔽 \"阶段。" +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of #, fuzzy -msgid "SecAggWorkflow" -msgstr "工作流程" +msgid "Afterward, add noise to the aggregated parameters." +msgstr "然后,在汇总参数中添加噪声。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 #, fuzzy -msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" -msgstr "" -"基础: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`." +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"SecAgg " -"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," -" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of -#, fuzzy -msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg 配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." -msgstr "根据 SecAgg 协议,每个客户的私人密钥被分成 N 份,其中 N 是所选客户的数量。" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of -#, fuzzy -msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." -msgstr "一般来说,\"重建阈值 \"越高,隐私保证就越好,但对丢包的容忍度就越低。" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "使用加权平均法汇总拟合结果。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"当 `reconstruction_threshold` " -"为浮点数时,它被解释为重建私钥所需的所有选定客户端数量的比例。此功能可根据所选客户端的数量灵活设置安全阈值。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"重构阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模量范围\")在 SecAgg " -"协议中平衡隐私性、鲁棒性和效率方面起着至关重要的作用。" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`collect_masked_vectors_stage " -"`\\(driver\\," -" ...\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -":py:obj:`setup_stage " -"`\\(driver\\, context\\," -" state\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"py:obj:`share_keys_stage " -"`\\(driver\\, " -"context\\, state\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of #, fuzzy -msgid "simulation" -msgstr "运行模拟" +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "指标汇总功能,可选。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "客户端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "控制算法的适应度。默认为 1e-9。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "Run a Flower App using the Simulation Engine." -msgstr "使用模拟引擎运行花朵应用程序。" +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "run\\_simulation" -msgstr "运行模拟" +msgid "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." -msgstr "要执行的 `ServerApp`。它将向运行在不同(虚拟)超级节点上的不同 `ClientApp`实例发送消息。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: flwr.simulation.run_simulation.run_simulation:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." -msgstr "由每个超级节点执行的 `ClientApp`。它将接收由 `ServerApp` 发送的信息。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." -msgstr "运行 ClientApp 的节点数。它们可被 ServerApp 中的驱动程序采样,并接收描述 ClientApp 应执行的操作的信息。" - -#: flwr.simulation.run_simulation.run_simulation:13 of -#, fuzzy -msgid "A simulation backend that runs `ClientApp`s." -msgstr "运行 \"客户端应用程序 \"的模拟后台。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"字典,例如 {\"\": , \"\": } 来配置后端。 中支持的值是 " -"`flwr.common.typing.ConfigsRecordValues`中包含的值。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." -msgstr "" -"布尔值,用于指示是否在主线程上启用 GPU 增长。如果您在 \"ServerApp \"上使用 TensorFlow 模型,同时让 " -"\"ClientApp \"在同一 GPU 上运行,则最好启用此选项。如果不启用此功能,您可能会遇到内存不足的错误,因为 TensorFlow " -"默认会分配所有 GPU 内存。有关 `tf.config.experimental.set_memory_growth()` " -"如何工作的更多信息,请参阅 TensorFlow 文档:https://www.tensorflow.org/api/stable。" +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "动量参数。默认为 0.9。" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "第二动量参数。默认为 0.99。" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." -msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "start\\_simulation" -msgstr "start_simulation" - -#: flwr.simulation.app.start_simulation:3 of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " -"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." -msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"列出每个客户的 `client_id`。只有在未设置 `num_clients` " -"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" -" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: flwr.simulation.app.start_simulation:31 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "实施基于 https://arxiv.org/abs/1602.05629" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" -" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " -"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" +"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " +"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" +"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " +"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.fedavg.FedAvg:33 of +#, fuzzy +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "启用(真)或禁用(假)模型更新的就地聚合。" -#: flwr.simulation.app.start_simulation:45 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.simulation.app.start_simulation:57 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " -"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" - -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." -msgstr "**hist** -- 包含训练指标的对象。" - -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "更新日志" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:3 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" -msgstr "感谢我们的贡献者" +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:9 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" -msgstr "有什么新内容?" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" -#: ../../source/ref-changelog.md:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of #, fuzzy -msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" -msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +msgid "Deserialize NumPy array from bytes." +msgstr "从字节反序列化 NumPy ndarray。" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" -#: ../../source/ref-changelog.md:31 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of #, fuzzy -msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +msgid "Serialize NumPy array to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" -#: ../../source/ref-changelog.md:35 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:39 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" -#: ../../source/ref-changelog.md:41 -msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." -msgstr "" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +#, fuzzy +msgid "Convert parameters object to NumPy weights." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.fedavgm.FedAvgM:3 of #, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" -msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:47 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:51 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#, fuzzy +msgid "FedMedian" +msgstr "联邦医保" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:65 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "使用中位数汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`TensorFlow快速入门 (教程) `_" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:71 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "As always, Flower code examples have received many updates." -msgstr "许多 \"Flower \"代码示例得到了大幅更新。" +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" -msgstr "停用" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:77 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 #, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "** 过时的 Python 3.7**" +msgid "FedOpt" +msgstr "FedOpt" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "动量参数。默认为 0.0。" + +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "第二动量参数。默认为 0.0。" -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:81 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:85 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:87 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" -msgstr "不兼容的更改" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 #, fuzzy +msgid "FedProx" +msgstr "FedProx" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "实施基于 https://arxiv.org/abs/1812.06127" + +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" -#: ../../source/ref-changelog.md:99 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" -msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "例如,在 PyTorch 中,损失将从:" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "致:" + +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "其中,\"global_params \"是训练前的参数副本。" + +#: flwr.server.strategy.fedprox.FedProx:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" +"优化中使用的近端项权重。0.0 使该策略等同于 " +"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" -#: ../../source/ref-changelog.md:103 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**介绍 Flower Next 高级应用程序接口(稳定版)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " -"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " -"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " -"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " -"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:117 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:119 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " -"还提供了新的 Flower Next " -"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " -"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " -"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " -"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " -"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Flower Modifiers(我们称之为 " -"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " -"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" -" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "向客户发送近端因子mu" -#: ../../source/ref-changelog.md:125 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 #, fuzzy -msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" -msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +msgid "FedTrimmedAvg" +msgstr "server.strategy.FedTrimmedAvg" -#: ../../source/ref-changelog.md:127 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of #, fuzzy -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." -msgstr "" -"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" -" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " -"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" -"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "截取分布两个尾部的分数。默认为 0.2。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" -" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" -"/explanation-differential-privacy.html) " -"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" -"differential-privacy.html) 介绍了 Flower 的使用方法。" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:133 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "使用修剪平均值汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " -"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " -"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " -"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:137 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." -msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:141 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:143 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " -"`ServerApp` 和 `ClientApp`。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 #, fuzzy -msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +msgid "FedXgbBagging" +msgstr "FedXgbBagging" -#: ../../source/ref-changelog.md:147 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" -"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " -"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:149 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "采用加权平均法计算评估损失总额。" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:151 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using bagging." +msgstr "使用 Bulyan 技术汇总拟合结果。" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"基准线。其他几条基准线也已更新。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " -"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:157 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:159 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " -"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " -"\"ClientApp\"。许多其他示例也得到了大量更新。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" - -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" -msgstr "无" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 #, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +msgid "FedXgbCyclic" +msgstr "FedXgbCyclic" -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:177 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" -#: ../../source/ref-changelog.md:179 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " -"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " -"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " -"RecordSet = self.context.state`。对该 `RecordSet` " -"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:181 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" - -#: ../../source/ref-changelog.md:183 -#, fuzzy -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " -"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:185 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:187 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" -"explore.github.io/mlx)的联合学习。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:189 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " -"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " -"提供了同类最佳的 XGBoost 支持。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:193 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 #, fuzzy -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:195 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of #, fuzzy msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." -msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +msgstr "" +"该策略已被弃用,但在 Flower Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" -#: ../../source/ref-changelog.md:197 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." -msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" -#: ../../source/ref-changelog.md:201 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " -"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:205 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:207 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:209 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:211 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:213 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 #, fuzzy -msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." -msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" +msgid "FedYogi" +msgstr "FedYogi" -#: ../../source/ref-changelog.md:215 +#: flwr.server.strategy.fedyogi.FedYogi:32 of #, fuzzy -msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" -msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "服务器端学习率。默认为 1e-1。" -#: ../../source/ref-changelog.md:217 +#: flwr.server.strategy.fedyogi.FedYogi:34 of #, fuzzy -msgid "Many Flower code examples received substantial updates." -msgstr "许多 \"Flower \"代码示例得到了大幅更新。" +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "客户端学习率。默认为 1e-1。" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" -msgstr "**更新 Flower Baselines**" +#: flwr.server.strategy.fedyogi.FedYogi:40 of +#, fuzzy +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "控制算法的适应度。默认为 1e-9。" -#: ../../source/ref-changelog.md:221 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" -msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:222 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:223 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:224 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:225 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:226 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:228 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#, fuzzy +msgid "Krum" +msgstr "Krum" -#: ../../source/ref-changelog.md:232 +#: flwr.server.strategy.krum.Krum:3 of #, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "实施基于 https://arxiv.org/abs/2304.07537。" + +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." -msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "使用 Krum 汇总拟合结果。" -#: ../../source/ref-changelog.md:240 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:242 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " -"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " -"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " -"`start_client`。示例和文档已相应更新。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." -msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:248 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:250 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 #, fuzzy -msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" -msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +msgid "QFedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " -"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:256 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:258 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " -"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:260 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:270 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"** 增加对 Python 3.12 的实验支持** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:272 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " -"\"示例,后者更加深入。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:276 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 #, fuzzy -msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +msgid "Strategy" +msgstr "Krum 策略。" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " -"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" -" README 中)。" - -#: ../../source/ref-changelog.md:280 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:282 -msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" -msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "聚合评估结果。" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" - -#: ../../source/ref-changelog.md:286 -#, fuzzy -msgid "Add gRPC request-response capability to the Android SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" - -#: ../../source/ref-changelog.md:288 -#, fuzzy -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" -msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "汇总训练结果。" -#: ../../source/ref-changelog.md:292 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " -"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " -"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"为了向后兼容,`start_client()` 和 `start_numpy_client()` " -"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:298 -msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" -msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "评估当前的模型参数。" -#: ../../source/ref-changelog.md:300 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " -"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "初始化(全局)模型参数。" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**添加新**\"Bulyan " -"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891)" +"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " +"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" -#: ../../source/ref-changelog.md:304 -msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" -msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "服务器等待客户端更新时发生的异常。" -#: ../../source/ref-changelog.md:306 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " +"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" +" \"中都应该有一个 \"异常\"。" -#: ../../source/ref-changelog.md:314 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " +"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" -#: ../../source/ref-changelog.md:316 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" -msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" -msgstr "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" -#: ../../source/ref-changelog.md:320 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" -msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" - -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" - -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" - -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" - -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +#, fuzzy +msgid "workflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" +msgstr "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +#, fuzzy +msgid "Default workflow in Flower." +msgstr "Flower 中的默认工作流程。" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +#, fuzzy +msgid "The workflow for the SecAgg+ protocol." +msgstr "SecAgg+ 协议的工作流程。" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of #, fuzzy -msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" -msgstr "" -"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446) " -"[#2561](https://github.com/adap/flower/pull/2561))" +msgid "The workflow for the SecAgg protocol." +msgstr "SecAgg 协议的工作流程。" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 #, fuzzy -msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" -msgstr "" -"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +msgid "DefaultWorkflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 #, fuzzy -msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" -msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +msgid "SecAggPlusWorkflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:346 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of #, fuzzy msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" - -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." -msgstr "Flower 进行了许多改进,这里就不一一列举了。" +"SecAgg+ " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" -#: ../../source/ref-changelog.md:352 -msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:354 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." -msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." +msgstr "" -#: ../../source/ref-changelog.md:356 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-changelog.md:358 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of +#, fuzzy msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." -msgstr "" -"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " -"`transport=\"rest\"` 来选择使用试验性 REST API。" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.5.0 (2023-08-31)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +#, fuzzy +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" -#: ../../source/ref-changelog.md:366 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of +#, fuzzy msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" -#: ../../source/ref-changelog.md:370 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +#, fuzzy msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"在 SecAgg+ " +"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" -#: ../../source/ref-changelog.md:372 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of +#, fuzzy msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." -msgstr "" -"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " -"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" -#: ../../source/ref-changelog.md:374 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of +#, fuzzy msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." -msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" -"run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" -#: ../../source/ref-changelog.md:376 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of +#, fuzzy msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" -msgstr "" -"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" -#: ../../source/ref-changelog.md:378 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of #, fuzzy msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." -msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " -"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" -#: ../../source/ref-changelog.md:380 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of +#, fuzzy msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"**介绍 Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " +"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" -#: ../../source/ref-changelog.md:382 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of +#, fuzzy msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." -msgstr "" -"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " -"和代码示例外,现在还有 iOS 快速入门教程。" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" -#: ../../source/ref-changelog.md:384 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of +#, fuzzy msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" -msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" -#: ../../source/ref-changelog.md:386 -msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." -msgstr "" -"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " -"和代码示例,现在还有 Android 快速入门教程。" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +#, fuzzy +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "过大的 `max_weight` 可能会影响量化的精度。" -#: ../../source/ref-changelog.md:388 -msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" -msgstr "" -"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +#, fuzzy +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" -#: ../../source/ref-changelog.md:390 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of +#, fuzzy msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." -msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" - -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" -msgstr "** 过时的 Python 3.7**" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" -#: ../../source/ref-changelog.md:394 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of +#, fuzzy msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" +"同样,当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" -#: ../../source/ref-changelog.md:396 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of +#, fuzzy msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," -" [#1853](https://github.com/adap/flower/pull/1853)" +"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " +"协议的隐私性、稳健性和效率方面发挥着关键作用。" -#: ../../source/ref-changelog.md:398 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" -#: ../../source/ref-changelog.md:400 -msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy +msgid "Execute the 'collect masked vectors' stage." +msgstr "执行 \"收集屏蔽向量 \"阶段。" -#: ../../source/ref-changelog.md:402 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " -"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " -"`start_driver` 的工作示例。" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:404 -msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" -msgstr "" -"为 `mt-pytorch` **代码示例**添加参数聚合 " -"([#1785](https://github.com/adap/flower/pull/1785))" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +#, fuzzy +msgid "Execute the 'setup' stage." +msgstr "执行 \"设置 \"阶段。" -#: ../../source/ref-changelog.md:406 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " -"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" -#: ../../source/ref-changelog.md:408 -msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" -msgstr "" -"**将实验性 REST API 移植到 Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +#, fuzzy +msgid "Execute the 'share keys' stage." +msgstr "执行 \"共享密钥 \"阶段。" -#: ../../source/ref-changelog.md:410 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " -"[Starlette](https://www.starlette.io/) 。" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +#, fuzzy +msgid "Execute the 'unmask' stage." +msgstr "执行 \"解除屏蔽 \"阶段。" + +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:412 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +msgstr "" +"基础: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`." -#: ../../source/ref-changelog.md:414 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#, fuzzy msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"**引入实验性 gRPC 请求-响应 API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901)" +"SecAgg " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" -#: ../../source/ref-changelog.md:416 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " -"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/ref-changelog.md:418 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of +#, fuzzy msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "根据 SecAgg 协议,每个客户的私人密钥被分成 N 份,其中 N 是所选客户的数量。" -#: ../../source/ref-changelog.md:420 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of +#, fuzzy msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" -msgstr "" -"**用新的** `start_client(transport=\"rest\")` 替换实验性** " -"`start_client(rest=True)` " -"([#1880](https://github.com/adap/flower/pull/1880))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "一般来说,\"重建阈值 \"越高,隐私保证就越好,但对丢包的容忍度就越低。" -#: ../../source/ref-changelog.md:422 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of +#, fuzzy msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" -" `transport`。过时的参数 `rest` 将在今后的版本中删除。" +"当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的所有选定客户端数量的比例。此功能可根据所选客户端的数量灵活设置安全阈值。" -#: ../../source/ref-changelog.md:424 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of +#, fuzzy msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." +msgstr "" +"重构阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模量范围\")在 SecAgg " +"协议中平衡隐私性、鲁棒性和效率方面起着至关重要的作用。" -#: ../../source/ref-changelog.md:426 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " -"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" +":py:obj:`collect_masked_vectors_stage " +"`\\(driver\\," +" ...\\)" -#: ../../source/ref-changelog.md:428 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" - -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" +msgstr "" +":py:obj:`setup_stage " +"`\\(driver\\, context\\," +" state\\)" -#: ../../source/ref-changelog.md:432 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +"py:obj:`share_keys_stage " +"`\\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:434 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " -"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " -"\"requirements.txt\"(除 \"pyproject.toml \"外)。" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" + +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" -#: ../../source/ref-changelog.md:436 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +#, fuzzy +msgid "Run a Flower App using the Simulation Engine." +msgstr "使用模拟引擎运行花朵应用程序。" -#: ../../source/ref-changelog.md:450 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:454 -msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " -"([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:456 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." -msgstr "" -"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "运行模拟" -#: ../../source/ref-changelog.md:458 +#: flwr.simulation.run_simulation.run_simulation:3 of +#, fuzzy msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "要执行的 `ServerApp`。它将向运行在不同(虚拟)超级节点上的不同 `ClientApp`实例发送消息。" -#: ../../source/ref-changelog.md:460 +#: flwr.simulation.run_simulation.run_simulation:6 of +#, fuzzy msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" -msgstr "" -"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " -"示例](https://github.com/adap/flower/tree/main/examples/ios)!" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "由每个超级节点执行的 `ClientApp`。它将接收由 `ServerApp` 发送的信息。" -#: ../../source/ref-changelog.md:462 +#: flwr.simulation.run_simulation.run_simulation:9 of +#, fuzzy msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -msgstr "" -"**引入新的 " -"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721)" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "运行 ClientApp 的节点数。它们可被 ServerApp 中的驱动程序采样,并接收描述 ClientApp 应执行的操作的信息。" -#: ../../source/ref-changelog.md:464 +#: flwr.simulation.run_simulation.run_simulation:12 of +#, fuzzy +msgid "A simulation backend that runs `ClientApp`s." +msgstr "运行 \"客户端应用程序 \"的模拟后台。" + +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" -"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " -"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/ref-changelog.md:466 +#: flwr.simulation.run_simulation.run_simulation:21 of +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +"布尔值,用于指示是否在主线程上启用 GPU 增长。如果您在 \"ServerApp \"上使用 TensorFlow 模型,同时让 " +"\"ClientApp \"在同一 GPU 上运行,则最好启用此选项。如果不启用此功能,您可能会遇到内存不足的错误,因为 TensorFlow " +"默认会分配所有 GPU 内存。有关 `tf.config.experimental.set_memory_growth()` " +"如何工作的更多信息,请参阅 TensorFlow 文档:https://www.tensorflow.org/api/stable。" -#: ../../source/ref-changelog.md:468 +#: flwr.simulation.run_simulation.run_simulation:28 of +#, fuzzy msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." -msgstr "" -"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " -"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " -"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." +msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" -#: ../../source/ref-changelog.md:470 +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "更新日志" + +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "感谢我们的贡献者" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" -msgstr "" -"**引入新的 Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" -#: ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:9 +#, fuzzy msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " -"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:474 +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "可选的改进措施" + +#: ../../source/ref-changelog.md:13 +#, fuzzy msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:476 +#: ../../source/ref-changelog.md:15 +#, fuzzy msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." -msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:478 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." -msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:480 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:482 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." -msgstr "" -"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " -"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " -"服务器的内存效率。" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:484 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" +msgstr "" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:486 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" -msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:488 -msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" -msgstr "" -"** 添加使用** `TabNet` ** 的新示例** " -"([#1725](https://github.com/adap/flower/pull/1725))" - -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-changelog.md:29 +#, fuzzy msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "不兼容的更改" + +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:41 +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." -msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "有什么新内容?" -#: ../../source/ref-changelog.md:496 +#: ../../source/ref-changelog.md:45 msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:498 +#: ../../source/ref-changelog.md:47 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " -"\"对象中。现在可以了!" -#: ../../source/ref-changelog.md:500 +#: ../../source/ref-changelog.md:49 msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." +msgstr "" -#: ../../source/ref-changelog.md:514 +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:518 +#: ../../source/ref-changelog.md:55 msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " -"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:520 +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" + +#: ../../source/ref-changelog.md:59 msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " -"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " -"客户端节点可以决定是否要处理某个任务。" -#: ../../source/ref-changelog.md:522 +#: ../../source/ref-changelog.md:60 msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -"**使Driver API 和Fleet " -"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:524 +#: ../../source/ref-changelog.md:61 msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " -"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/ref-changelog.md:526 +#: ../../source/ref-changelog.md:63 #, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" - -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "支持 IPv4 和 IPv6 地址。" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:530 +#: ../../source/ref-changelog.md:65 msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:532 -msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)。" -#: ../../source/ref-changelog.md:534 +#: ../../source/ref-changelog.md:68 msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " -"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:536 +#: ../../source/ref-changelog.md:69 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" -" 工具。" -#: ../../source/ref-changelog.md:538 +#: ../../source/ref-changelog.md:70 msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" +msgstr "" -#: ../../source/ref-changelog.md:540 +#: ../../source/ref-changelog.md:72 +#, fuzzy msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" -" `proximal_mu`的参数,使局部模型与全局模型正则化。" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:542 +#: ../../source/ref-changelog.md:74 msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." +msgstr "" -#: ../../source/ref-changelog.md:544 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." -msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:546 +#: ../../source/ref-changelog.md:78 msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." +msgstr "" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:550 +#: ../../source/ref-changelog.md:82 msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." +msgstr "" -#: ../../source/ref-changelog.md:552 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:554 +#: ../../source/ref-changelog.md:86 msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github. com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github. com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github. com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/ref-changelog.md:558 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 -msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "停用" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: ../../source/ref-changelog.md:102 +#, fuzzy +msgid "" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:572 +#: ../../source/ref-changelog.md:104 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:576 +#: ../../source/ref-changelog.md:113 +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -"**引入新的 Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:578 +#: ../../source/ref-changelog.md:115 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"在未来几周内,我们将发布一些新的参考,特别是对 FL " -"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" -#: ../../source/ref-changelog.md:580 +#: ../../source/ref-changelog.md:117 +#, fuzzy msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:582 -msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " -"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/ref-changelog.md:584 +#: ../../source/ref-changelog.md:121 +#, fuzzy msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" -msgstr "" -"**改进 Jupyter Notebook 教程中的 GPU 支持** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:586 +#: ../../source/ref-changelog.md:123 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " -"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/ref-changelog.md:588 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" -"flower-pytorch.html)" -#: ../../source/ref-changelog.md:589 +#: ../../source/ref-changelog.md:135 +#, fuzzy msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" -msgstr "" -"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" -"learning-strategy-pytorch.html)" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:590 +#: ../../source/ref-changelog.md:137 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" -"scratch-pytorch.html)" -#: ../../source/ref-changelog.md:591 +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" + +#: ../../source/ref-changelog.md:145 +#, fuzzy msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:593 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:595 +#: ../../source/ref-changelog.md:151 msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " -"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/ref-changelog.md:597 +#: ../../source/ref-changelog.md:153 +#, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " -"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:599 +#: ../../source/ref-changelog.md:155 msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:601 +#: ../../source/ref-changelog.md:157 +#, fuzzy msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " -"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " -"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:603 +#: ../../source/ref-changelog.md:159 msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" -" Flower 的用户来说尤其有用。" -#: ../../source/ref-changelog.md:605 +#: ../../source/ref-changelog.md:161 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" -msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." +msgstr "" -#: ../../source/ref-changelog.md:607 +#: ../../source/ref-changelog.md:163 +#, fuzzy msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" -msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" +msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:609 +#: ../../source/ref-changelog.md:165 msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -"** 添加新的使用 Pandas " -"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/ref-changelog.md:611 +#: ../../source/ref-changelog.md:167 +#, fuzzy msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" -"/quickstart-pandas)。" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:613 +#: ../../source/ref-changelog.md:169 msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"**添加新策略: Krum 和 MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:615 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " -"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:617 +#: ../../source/ref-changelog.md:173 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " -"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/ref-changelog.md:619 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." -msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:621 +#: ../../source/ref-changelog.md:177 msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-changelog.md:179 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:629 +#: ../../source/ref-changelog.md:181 msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" -"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" - -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-changelog.md:183 +#, fuzzy msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-changelog.md:185 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:645 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:647 +#: ../../source/ref-changelog.md:189 msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" -" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:193 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " -"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/ref-changelog.md:653 +#: ../../source/ref-changelog.md:195 +#, fuzzy msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:655 +#: ../../source/ref-changelog.md:197 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -"新的 \"FedMedian \"战略实现了[Yin " -"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" - -#: ../../source/ref-changelog.md:657 -msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:659 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." -msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" +msgstr "" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:661 +#: ../../source/ref-changelog.md:201 msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." +msgstr "" -#: ../../source/ref-changelog.md:663 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " -"\"float\",以允许分配分数资源。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:665 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:667 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." -msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:669 +#: ../../source/ref-changelog.md:211 +#, fuzzy msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"**使用** `flwr`向软件包提供类型信息 " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:671 +#: ../../source/ref-changelog.md:213 +#, fuzzy msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " -"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:215 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" - -#: ../../source/ref-changelog.md:675 -msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." -msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/ref-changelog.md:677 -msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github. com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:221 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." -msgstr "文档更新的数量之多,甚至没有必要逐一列出。" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." +msgstr "" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:223 msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" +msgstr "" -#: ../../source/ref-changelog.md:683 +#: ../../source/ref-changelog.md:225 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" -msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." +msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:229 +#, fuzzy msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:231 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " -"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:233 +#, fuzzy msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:235 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " -"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" - -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" - -#: ../../source/ref-changelog.md:699 -msgid "Highlights" -msgstr "亮点" - -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" - -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" - -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" -msgstr "可配置的`get_parameters`" -#: ../../source/ref-changelog.md:704 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:243 +#, fuzzy msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " -"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:247 +#, fuzzy msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:249 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." +msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:251 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " -"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," -" client=FlowerClient())`)。" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:253 +#, fuzzy msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " -"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:255 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " -"`start_simulation`现在用一个类型为 " -"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " -"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:257 +#, fuzzy msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:259 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" - -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." +msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:261 +#, fuzzy msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:263 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " -"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-changelog.md:265 +#, fuzzy +msgid "" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:267 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -"**添加*** `server_round` ***到*** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." -msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" - -#: ../../source/ref-changelog.md:740 -msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-changelog.md:271 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " -"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:273 +#, fuzzy msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:275 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"几个 Flower " -"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" -" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:277 +#, fuzzy msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"**移动*** `flwr.dataset` **到*** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" - -#: ../../source/ref-changelog.md:752 -msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:279 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." -msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." +msgstr "" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:281 +#, fuzzy msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"**重新命名** `Weights` **到** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:283 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." -msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." +msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:285 +#, fuzzy msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:762 +#: ../../source/ref-changelog.md:287 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -"start_server \"参数 \"force_final_distributed_eval " -"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:289 msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:291 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " -"\"一样,都接受配置字典。" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:293 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:295 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " -"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:297 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" +msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:299 +#, fuzzy msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." -msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:301 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:303 +#, fuzzy msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" -msgstr "" -"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " -"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " -"`evaluate`!" - -#: ../../source/ref-changelog.md:782 -msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" - -#: ../../source/ref-changelog.md:784 -msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." -msgstr "" -"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " -"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" - -#: ../../source/ref-changelog.md:786 -msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -msgstr "" -"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" - -#: ../../source/ref-changelog.md:788 -msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" - -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" - -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" - -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch`" - -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" - -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" - -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow`" - -#: ../../source/ref-changelog.md:797 -msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" - -#: ../../source/ref-changelog.md:799 -msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" -msgstr "" -"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " -"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" - -#: ../../source/ref-changelog.md:801 -msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" "**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " "[#1209](https://github.com/adap/flower/pull/1209), " @@ -20992,514 +19609,919 @@ msgstr "" "[#1305](https://github.com/adap/flower/pull/1305), " "[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:305 +#, fuzzy +msgid "As always, Flower code examples have received many updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" + +#: ../../source/ref-changelog.md:307 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " -"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " -"`.md`,并修复了一些较小的细节!" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" -msgstr "**小规模更新**" +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "** 过时的 Python 3.8**" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:313 +#, fuzzy msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.8 已于 2024-10-01 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:315 +#, fuzzy msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " -"([#847](https://github.com/adap/flower/pull/847))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:317 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:319 +#, fuzzy msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259)" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:321 +msgid "" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." +msgstr "" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:325 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:818 -#, fuzzy +#: ../../source/ref-changelog.md:327 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"Flower Baselines 的第一个预览版已经发布!我们通过实现 " -"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " -"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" -"/contributing-baselines.html)。" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:329 +#, fuzzy msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:331 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " -"演示了一个简单的 C++ 客户端。" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:333 +#, fuzzy msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:335 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " -"Python 版本的实验支持。" -#: ../../source/ref-changelog.md:828 -msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:343 +#, fuzzy msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " -"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:347 +#, fuzzy msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" +msgstr "" +"**介绍 Flower Next 高级应用程序接口(稳定版)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:349 +#, fuzzy msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " -"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" +"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " +"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " +"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " +"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " +"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -"**允许在所有内置策略中同时使用联邦评价和集中评估** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:353 +#, fuzzy msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " -"`0.0`来禁用联邦评估。" +"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " +"还提供了新的 Flower Next " +"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " +"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " +"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " +"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " +"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:355 +#, fuzzy msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"**两本新的 Jupyter Notebook 教程** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:357 +#, fuzzy msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." +msgstr "" +"Flower Modifiers(我们称之为 " +"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " +"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" +" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"*联邦学习简介*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:361 +#, fuzzy msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"*在联邦学习中使用策略*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" +" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " +"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" +"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:365 +#, fuzzy msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." -msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." - -#: ../../source/ref-changelog.md:852 -msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" - -#: ../../source/ref-changelog.md:854 -msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." -msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" - -#: ../../source/ref-changelog.md:856 -msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143)" - -#: ../../source/ref-changelog.md:858 -msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." -msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" +"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" +" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" +"/explanation-differential-privacy.html) " +"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" +"differential-privacy.html) 介绍了 Flower 的使用方法。" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:367 +#, fuzzy msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " -"运行([#1177](https://github.com/adap/flower/pull/1177))" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:369 +#, fuzzy msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " -"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" +"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " +"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " +"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " +"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" -" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175)" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:373 +#, fuzzy msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." +msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" - -#: ../../source/ref-changelog.md:870 -msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:377 +#, fuzzy msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." +msgstr "" +"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " +"`ServerApp` 和 `ClientApp`。" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:379 +#, fuzzy msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:381 +#, fuzzy msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " -"([#869](https://github.com/adap/flower/pull/869))" +"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" +"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " +"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:385 +#, fuzzy msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"**删除过时的 DefaultStrategy 策略** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"基准线。其他几条基准线也已更新。" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:387 +#, fuzzy msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"**删除已过时的对 eval_fn 返回值准确性的支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:389 +#, fuzzy msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " +"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:391 +#, fuzzy msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:393 +#, fuzzy msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " -"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation` " -"使用虚拟客户端引擎)现在可以更流畅地运行。" +"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " +"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " +"\"ClientApp\"。许多其他示例也得到了大量更新。" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:395 +#, fuzzy msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -"**新的 Jupyter Notebook 代码示例** " -"([#833](https://github.com/adap/flower/pull/833))" +"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:407 +#, fuzzy msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " -"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:411 +#, fuzzy msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" -msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" +msgstr "" +"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435))" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:413 +#, fuzzy msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." -msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." +msgstr "" +"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " +"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " +"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " +"RecordSet = self.context.state`。对该 `RecordSet` " +"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:417 +#, fuzzy msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." -msgstr "" -"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " -"Flower 就变得更容易了。" - -#: ../../source/ref-changelog.md:899 -msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " -"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " -"`FedAvg`实现迈出的第一步。" +"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " +"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:421 +#, fuzzy msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " -"keepalive 时间,自定义 gRPC 堆栈。" +"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" +"explore.github.io/mlx)的联合学习。" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"**使用 Opacus 和 PyTorch 的新差分隐私示例** " -"([#805](https://github.com/adap/flower/pull/805))" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:425 +#, fuzzy msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." +msgstr "" +"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " +"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " +"提供了同类最佳的 XGBoost 支持。" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" -msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:429 +#, fuzzy msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." -msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:433 +#, fuzzy msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:435 +#, fuzzy msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" "** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " "[#844](https://github.com/adap/flower/pull/844), " @@ -21508,6707 +20530,12081 @@ msgstr "" "[#993](https://github.com/adap/flower/pull/993), " "[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:437 +#, fuzzy msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." -msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." +msgstr "" +"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " +"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"**更新**`FedAdam`**和**`FedYogi`**战略** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:441 +#, fuzzy msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." -msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:443 +#, fuzzy msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" -msgstr "" -"**初始化** `start_simulation` **使用客户端 ID 列表** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:445 +#, fuzzy msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " -"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " -"`int` 标识符访问的数据分区。" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:447 +#, fuzzy msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" -msgstr "" -"更新 PyTorch 代码示例中的 \"num_examples \"计算 " -"([#909](https://github.com/adap/flower/pull/909))" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" -#: ../../source/ref-changelog.md:932 +#: ../../source/ref-changelog.md:449 +#, fuzzy msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"通过 `flwr.__version__` 公开 Flower 版本 " -"([#952](https://github.com/adap/flower/pull/952))" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:933 -msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" -msgstr "" -"`app.py`中的 `start_server`现在会返回一个 `History` " -"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" +#: ../../source/ref-changelog.md:451 +#, fuzzy +msgid "Many Flower code examples received substantial updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" + +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" +msgstr "**更新 Flower Baselines**" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:455 +#, fuzzy msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"使 `max_workers`(由 " -"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:935 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:936 -msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" -msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" - -#: ../../source/ref-changelog.md:937 -msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" -msgstr "还有更多底层更改、库更新、文档更改和工具改进!" +#: ../../source/ref-changelog.md:457 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:941 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" -msgstr "" -"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " -"([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:458 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/ref-changelog.md:943 -msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." -msgstr "" -"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " -"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:949 +#: ../../source/ref-changelog.md:462 +#, fuzzy msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:951 +#: ../../source/ref-changelog.md:464 +#, fuzzy msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " -"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " -"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" +"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:466 +#, fuzzy msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." -msgstr "" -"该功能仍处于试验阶段,因此无法保证 API " -"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:468 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" - -#: ../../source/ref-changelog.md:957 -msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" - -#: ../../source/ref-changelog.md:958 -msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:470 +#, fuzzy msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"**新的 PyTorch Lightning 代码示例** " -"([#617](https://github.com/adap/flower/pull/617))" - -#: ../../source/ref-changelog.md:962 -msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" -msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" - -#: ../../source/ref-changelog.md:964 -msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" - -#: ../../source/ref-changelog.md:966 -msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" -msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" - -#: ../../source/ref-changelog.md:970 -msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" -msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" +"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"当 `min_available_clients` 配置错误时发出警告 " -"([#830](https://github.com/adap/flower/pull/830))" - -#: ../../source/ref-changelog.md:972 -msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:476 +#, fuzzy msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." +msgstr "" +"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " +"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " +"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " +"`start_client`。示例和文档已相应更新。" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:478 +#, fuzzy msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:480 +#, fuzzy msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" -msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:482 +#, fuzzy msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" -" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" - -#: ../../source/ref-changelog.md:982 -msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" -msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:484 +#, fuzzy msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " -"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:486 +#, fuzzy msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:488 +#, fuzzy msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " -"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" +"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " +"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" +#: ../../source/ref-changelog.md:490 +#, fuzzy +msgid "" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:492 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." +msgstr "" +"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " +"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" -msgstr "(摘要) FedOpt" +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:500 +#, fuzzy msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " +msgstr "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:504 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " -"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" +"** 增加对 Python 3.12 的实验支持** " +"([#2565](https://github.com/adap/flower/pull/2565))" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " -"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " -"指标字典,以便服务器跟踪。" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:508 +#, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " -"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " -"`return loss, {\"accuracy\": accuracy}`。" +"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " +"\"示例,后者更加深入。" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:510 +#, fuzzy msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." -msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:512 +#, fuzzy msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." +msgstr "" +"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " +"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" +" README 中)。" + +#: ../../source/ref-changelog.md:514 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" -#: ../../source/ref-changelog.md:1011 +#: ../../source/ref-changelog.md:516 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " -"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-changelog.md:1013 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" -msgstr "MXNet 示例和文档" +#: ../../source/ref-changelog.md:520 +#, fuzzy +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-changelog.md:1017 +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:1021 -msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" -msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-changelog.md:1023 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " -"`Parameters` 类取代(例如在 `Strategy`中)。参数 " -"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:1025 +#: ../../source/ref-changelog.md:528 +#, fuzzy msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" -" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" +"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " +"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " +"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:530 +#, fuzzy msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"已弃用 `flwr.server.Server.evaluate`,改用 " -"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" - -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" +"为了向后兼容,`start_client()` 和 `start_numpy_client()` " +"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:532 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" -msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:534 +#, fuzzy msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " -"\"方法进行。" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " +"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:536 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"内置策略支持名为 \"initial_parameters " -"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" +"**添加新**\"Bulyan " +"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891)" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:538 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." -msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:540 +#, fuzzy msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" -msgstr "" -"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " -"`flwr.server.strategy.FedAvg`)" - -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#, fuzzy msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:548 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " -"返回几乎任意的值,并在服务器端使用它们!" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:550 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " -"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:552 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " -"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " -"`float, int, Dict[str, Scalar]`)。详见下面的示例。" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:554 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" - -#: ../../source/ref-changelog.md:1089 -msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -msgstr "" -"**在**`Client.fit` " -"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" - -#: ../../source/ref-changelog.md:1091 -msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." -msgstr "" -"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。" - -#: ../../source/ref-changelog.md:1093 -msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " -"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" - -#: ../../source/ref-changelog.md:1095 -msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" - -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/ref-changelog.md:1116 -msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" -msgstr "改进文档" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -#: ../../source/ref-changelog.md:1120 -msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:1121 -msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" -msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" -msgstr "错误修正:" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:570 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " -"\"中处理的([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))。" - -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" - -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" -msgstr "重要变更:" - -#: ../../source/ref-changelog.md:1131 -msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" -msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:1132 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"添加了一个新的 NumPyClient(除现有的 KerasClient " -"之外)([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508)" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:574 +#, fuzzy msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" - -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" -msgstr "不兼容的更改:" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446) " +"[#2561](https://github.com/adap/flower/pull/2561))" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " -"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " -"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" - -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " -"。迁移时请使用 `FedAvg`。" - -#: ../../source/ref-changelog.md:1148 -msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." -msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:580 +#, fuzzy msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"删除了策略界面中目前未使用的 " -"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" +"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" -#: ../../source/ref-changelog.md:1150 -msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." -msgstr "" -"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "Flower 进行了许多改进,这里就不一一列举了。" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:586 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"改进了 `Strategy` " -"docstrings([#470](https://github.com/adap/flower/pull/470))。" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" -msgstr "项目实例" +#: ../../source/ref-changelog.md:588 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:590 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " -"`PyTorch `_ 或 `TensorFlow " -"`_。" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-example-projects.rst:10 -#, fuzzy +#: ../../source/ref-changelog.md:592 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "以下示例可作为独立项目使用。" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." +msgstr "" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " +"`transport=\"rest\"` 来选择使用试验性 REST API。" -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" -msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.5.0 (2023-08-31)" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:600 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"`TensorFlow快速入门 (代码) `_" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-example-projects.rst:18 -#, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +#: ../../source/ref-changelog.md:604 +msgid "" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"`TensorFlow快速入门 (教程) `_" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:606 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"`TensorFlow快速入门 (博客) `_" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "PyTorch快速入门" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " +"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:608 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" -msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +msgstr "" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:610 msgid "" -"`Quickstart PyTorch (Code) " -"`_" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"`PyTorch快速入门 (代码) `_" +"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:612 #, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" -msgstr "" -"`PyTorch快速入门 (教程) `_" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch: 从集中式到联邦式" - -#: ../../source/ref-example-projects.rst:35 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" -msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." +msgstr "" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:614 msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"PyTorch: 从集中式到联邦式(代码) `_" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" -#: ../../source/ref-example-projects.rst:38 -#, fuzzy +#: ../../source/ref-changelog.md:616 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -"PyTorch: 从集中式到联邦式(教程) `_" - -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "树莓派和 Nvidia Jetson 上的联邦学习" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " +"和代码示例外,现在还有 iOS 快速入门教程。" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:618 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:620 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " -"`_" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " +"和代码示例,现在还有 Android 快速入门教程。" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:622 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " -"`_" +"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:624 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" -#: ../../source/ref-faq.rst -#, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" +msgstr "** 过时的 Python 3.7**" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:628 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:630 msgid "" -"`Flower simulation PyTorch " -"`_" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"`Flower 模拟 PyTorch " -"`_" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," +" [#1853](https://github.com/adap/flower/pull/1853)" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:632 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -"`Flower模拟TensorFlow/Keras " -"`_" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" -msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" +#: ../../source/ref-changelog.md:634 +msgid "" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:636 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"请点击此处查看有关嵌入式设备联邦学习的 " -"\"博文\"`_和相应的" -" \"GitHub 代码示例\"`_。" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" -msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " +"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " +"`start_driver` 的工作示例。" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:638 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" +"为 `mt-pytorch` **代码示例**添加参数聚合 " +"([#1785](https://github.com/adap/flower/pull/1785))" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:640 msgid "" -"`Android Kotlin example `_" -msgstr "`Android Kotlin 示例 `_" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." +msgstr "" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " +"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "Android Java 示例 `_" +#: ../../source/ref-changelog.md:642 +msgid "" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" +msgstr "" +"**将实验性 REST API 移植到 Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" +#: ../../source/ref-changelog.md:644 +msgid "" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." +msgstr "" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " +"[Starlette](https://www.starlette.io/) 。" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:646 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" -msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:648 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"**引入实验性 gRPC 请求-响应 API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901)" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:650 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -"`Flower meets Nevermined YouTube 视频 " -"`_." +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " +"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/ref-faq.rst:30 -#, fuzzy +#: ../../source/ref-changelog.md:652 msgid "" -"`Flower meets KOSMoS `_." -msgstr "" -"`Flower meets KOSMoS `_." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:654 msgid "" -"`Flower meets Talan blog post `_ ." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"`Flower meets Talan博文 `_ 。" +"**用新的** `start_client(transport=\"rest\")` 替换实验性** " +"`start_client(rest=True)` " +"([#1880](https://github.com/adap/flower/pull/1880))" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:656 msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" +" `transport`。过时的参数 `rest` 将在今后的版本中删除。" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "遥测功能" +#: ../../source/ref-changelog.md:658 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:660 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " +"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:662 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "原则" - -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "我们遵循严格的匿名使用指标收集原则:" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-telemetry.md:11 -msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." -msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:666 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " -"\"了解报告的指标。" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:668 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" -"being-reported)\"部分" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " +"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " +"\"requirements.txt\"(除 \"pyproject.toml \"外)。" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:670 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "如何退出" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:684 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " -"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " -"服务器或客户端,只需在命令前添加以下内容即可:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:688 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " -"Flower telemetry。" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "收集的指标" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "Flower 遥测技术收集以下指标:" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " +"([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:690 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." -msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." +msgstr "" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:692 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" -msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:694 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." -msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" +msgstr "" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " +"示例](https://github.com/adap/flower/tree/main/examples/ios)!" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:696 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." -msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" +msgstr "" +"**引入新的 " +"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721)" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:698 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." -msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" +msgstr "" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" +"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " +"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:700 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " -"工作负载,而且还成功完成了它们。" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:702 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " -"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " +"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " +"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:704 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." -msgstr "" -"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" -" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" - -#: ../../source/ref-telemetry.md:46 -msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." -msgstr "" -"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" -"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" - -#: ../../source/ref-telemetry.md:48 -msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "如何检查报告中的内容" - -#: ../../source/ref-telemetry.md:52 -msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." -msgstr "" -"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " -"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " -"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" - -#: ../../source/ref-telemetry.md:58 -msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "如何联系我们" - -#: ../../source/ref-telemetry.md:66 -msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" -"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" - -#: ../../source/tutorial-quickstart-android.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" - -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" -msgstr "快速入门 Android" - -#: ../../source/tutorial-quickstart-android.rst:10 -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" -msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" +"**引入新的 Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:706 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" - -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" - -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "快速入门 fastai" - -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " +"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:708 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"请参阅 `完整代码示例 `_了解更多信息。" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:710 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" - -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "🤗 Transformers快速入门" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:712 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" -msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:714 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "依赖关系" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:716 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " -":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "标准Hugging Face工作流程" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "处理数据" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " +"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " +"服务器的内存效率。" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:718 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" -msgstr "" -"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" -" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "训练和测试模型" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:720 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" -msgstr "" -"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " -"训练或测试循环都非常相似:" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "创建模型本身" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:722 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " -"加载预训练的 distillBERT 模型:" - -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "将示例联邦化" - -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "创建 IMDBClient" +"** 添加使用** `TabNet` ** 的新示例** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:724 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:726 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." -msgstr "" -":code:`get_parameters` " -"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" - -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "启动服务器" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:728 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" -msgstr "" -"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:730 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"使用 :code:`weighted_average` " -"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "把所有东西放在一起" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "现在我们可以使用:" - -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "他们就能连接到服务器,开始联邦训练。" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/tutorial-quickstart-huggingface.rst:223 -#, fuzzy +#: ../../source/ref-changelog.md:732 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." - -#: ../../source/tutorial-quickstart-huggingface.rst:226 -msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." -msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" - -#: ../../source/tutorial-quickstart-huggingface.rst:229 -msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." -msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" - -#: ../../source/tutorial-quickstart-ios.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" - -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" -msgstr "快速入门 iOS" - -#: ../../source/tutorial-quickstart-ios.rst:10 -msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." -msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " +"\"对象中。现在可以了!" -#: ../../source/tutorial-quickstart-ios.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:734 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " -"`_ 中运行一切。对于在 iOS 中实现 " -"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" -#: ../../source/tutorial-quickstart-ios.rst:15 -msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:748 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:752 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" -msgstr "或者Poetry:" - -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Flower 客户端" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " +"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:754 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " -"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " -"中实现并存储。客户端实现如下:" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " +"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " +"客户端节点可以决定是否要处理某个任务。" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:756 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " -":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " -"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " -"`_ 以了解更多有关应用程序的信息。" - -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" +"**使Driver API 和Fleet " +"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:758 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " -"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " -"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " -"中完成。" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " +"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:760 #, fuzzy msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " -"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" -#: ../../source/tutorial-quickstart-ios.rst:102 -#, fuzzy -msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "支持 IPv4 和 IPv6 地址。" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:764 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." -msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:766 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " -":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " -"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" +"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Flower 服务器" - -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:768 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " -"Flower 并启动服务器:" - -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "联邦训练模型!" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:770 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" +" 工具。" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:772 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." -msgstr "" -"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " -"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " -"`_。" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:774 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 " -":code:`examples/ios` 中找到。" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" +" `proximal_mu`的参数,使局部模型与全局模型正则化。" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:776 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "快速入门 JAX" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:778 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "快速入门Pandas" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" +#: ../../source/ref-changelog.md:780 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:782 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"请参阅 `完整代码示例 `_\" 了解更多信息。" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:784 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:786 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" +msgstr "" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -#, fuzzy +#: ../../source/ref-changelog.md:788 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github. com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github. com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:792 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:806 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" -msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +msgstr "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:810 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch " -"的《Deep Learning with PyTorch " -"`_》。" +"**引入新的 Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:812 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "此外,我们还在 PyTorch 中定义了设备分配:" - -#: ../../source/tutorial-quickstart-pytorch.rst:62 -msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " -":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" +"在未来几周内,我们将发布一些新的参考,特别是对 FL " +"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:814 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." -msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:816 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." -msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." +msgstr "" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " +"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:818 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." -msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" +msgstr "" +"**改进 Jupyter Notebook 教程中的 GPU 支持** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:820 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" -msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " +"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:822 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." -msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" +"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" +"flower-pytorch.html)" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:823 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" +"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" +"learning-strategy-pytorch.html)" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:824 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 PyTorch 时,它使 " -":code:`Client` 接口的实现变得更容易。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "以 NumPy ndarrays 列表形式返回模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (可选)" +"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" +"scratch-pytorch.html)" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:825 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "用从服务器接收到的参数更新本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "设置本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "训练本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "接收更新的本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "测试本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "可以通过以下方式实现:" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" +"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:827 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/ref-changelog.md:829 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:831 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" -msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" - -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "打开另一台终端,启动第二个客户端:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." +msgstr "" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " +"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:833 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" -msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:835 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " +"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " +"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:837 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" +" Flower 的用户来说尤其有用。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "快速入门 PyTorch Lightning" +#: ../../source/ref-changelog.md:839 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:841 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" -msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:843 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -"请参阅 `完整代码示例 `_ 了解更多信息。" +"** 添加新的使用 Pandas " +"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:845 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "scikit-learn快速入门" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." +msgstr "" +"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:847 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " -"Regression` 模型。" +"**添加新策略: Krum 和 MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:849 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " +"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:851 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" +msgstr "" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " +"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:853 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -#, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "或者直接使用 Poetry 安装所有依赖项:" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:855 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " -":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" +#: ../../source/ref-changelog.md:859 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" +#: ../../source/ref-changelog.md:863 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" +"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -#, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "设置:code:`sklean`的LogisticRegression模型的参数" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" +#: ../../source/ref-changelog.md:873 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "初始化 Flower 服务器将要求的模型参数" +#: ../../source/ref-changelog.md:875 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:879 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -"更多详情请查看 :code:`utils.py`` 这里 " -"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " -"还需要导入几个软件包,如 Flower 和 scikit-learn:" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 -#, fuzzy +#: ../../source/ref-changelog.md:881 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " -"\"Flower Datasets " -"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" -" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" +" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:883 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." -msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:885 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " +"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:887 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" -msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " -"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "直接导入 :code:`utils.set_model_params()`" +#: ../../source/ref-changelog.md:889 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" +"新的 \"FedMedian \"战略实现了[Yin " +"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "这些方法可以通过以下方式实现:" +#: ../../source/ref-changelog.md:891 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:893 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -#, fuzzy +#: ../../source/ref-changelog.md:895 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." -msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:897 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" -"learn。" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " +"\"float\",以允许分配分数资源。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" +#: ../../source/ref-changelog.md:899 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -#, fuzzy +#: ../../source/ref-changelog.md:901 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." -msgstr "" -"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " -"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:903 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " -":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " -"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " -":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" +"**使用** `flwr`向软件包提供类型信息 " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/ref-changelog.md:905 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." +msgstr "" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " +"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:907 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:909 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "快速入门 TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "在导入 Flower 之前,我们必须先安装它:" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:911 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github. com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:913 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." -msgstr "" -"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " -"NumPy ndarrays 的形式返回整个训练集和测试集。" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "文档更新的数量之多,甚至没有必要逐一列出。" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:915 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:917 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" -msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " -":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "每个客户都有自己的数据集。" +#: ../../source/ref-changelog.md:919 +msgid "" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:921 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" -msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." +msgstr "" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " +"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:923 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -"`_ 可以在 :code:`examples/quickstart-" -"tensorflow/client.py` 中找到。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:925 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " +"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "XGBoost快速入门" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "联邦化 XGBoost" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "亮点" -#: ../../source/tutorial-quickstart-xgboost.rst:16 -msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." -msgstr "" -"EXtreme Gradient " -"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" -" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" -#: ../../source/tutorial-quickstart-xgboost.rst:20 -msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." -msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "为什么选择联邦 XGBoost?" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "可配置的`get_parameters`" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:938 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:942 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " -"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " +"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:944 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." -msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " -"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " -"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" -"comprehensive `_),以运行各种实验。" - -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" -msgstr "环境设定" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:948 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/ref-changelog.md:950 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " +"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," +" client=FlowerClient())`)。" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:952 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" +msgstr "" +"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " +"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:954 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" -msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" - -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "数据集划分和超参数选择" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " +"`start_simulation`现在用一个类型为 " +"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " +"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:956 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" -msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:958 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" -msgstr "" -"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" -" :code:`node_id` 为给定客户端加载分区:" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." -msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" -#: ../../source/tutorial-quickstart-xgboost.rst:134 -msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:964 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" + +#: ../../source/ref-changelog.md:966 +msgid "" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " -"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " +"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "用于 XGBoost 的 Flower 客户端定义" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:970 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " -":code:`XgbClient` 类。" +"**添加*** `server_round` ***到*** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:972 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." -msgstr "" -"代码:`self.bst`用于保存在各轮中保持一致的 Booster " -"对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:974 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " -":code:`evaluate` 方法如下。" +"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:976 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " -":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " -":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " +"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:978 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " -"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " -":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:980 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"给定 :code:`num_local_round`,我们通过调用 " -":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " -"树并发送给服务器。" +"几个 Flower " +"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" +" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:982 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." -msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-quickstart-xgboost.rst:294 -msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" -msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:986 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." -msgstr "" -"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/ref-changelog.md:988 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." -msgstr "" -"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" -" FL。" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:990 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " -"FedXgbBagging。" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "我们首先定义了 XGBoost bagging聚合策略。" +"**重新命名** `Weights` **到** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:992 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." -msgstr "" -"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " -"值并求取平均值。" - -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "然后,我们启动服务器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "基于树的bagging聚合" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:994 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." -msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:996 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " -":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " -":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" +"start_server \"参数 \"force_final_distributed_eval " +"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:998 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" -" 树:" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:1000 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " -"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" +"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " +"\"一样,都接受配置字典。" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:1002 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "启动联邦 XGBoost!" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" +"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:1004 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " -"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " +"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:1008 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." -msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "综合的联邦 XGBoost" +#: ../../source/ref-changelog.md:1010 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" -#: ../../source/tutorial-quickstart-xgboost.rst:596 -#, fuzzy +#: ../../source/ref-changelog.md:1012 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" -"comprehensive 示例 (`完整代码 " -"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" - -#: ../../source/tutorial-quickstart-xgboost.rst:603 -#, fuzzy -msgid "Cyclic training" -msgstr "集中式训练" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-quickstart-xgboost.rst:605 -#, fuzzy +#: ../../source/ref-changelog.md:1014 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " -"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " -"树将传递给下一个客户端,作为下一轮提升的初始化模型。" +"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " +"`evaluate`!" -#: ../../source/tutorial-quickstart-xgboost.rst:609 -#, fuzzy +#: ../../source/ref-changelog.md:1016 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" -msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-quickstart-xgboost.rst:649 -#, fuzzy +#: ../../source/ref-changelog.md:1018 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " -":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " -":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " +"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" -#: ../../source/tutorial-quickstart-xgboost.rst:690 -#, fuzzy +#: ../../source/ref-changelog.md:1020 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " -"将接收到的客户端模型复制为全局模型。" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -#, fuzzy +#: ../../source/ref-changelog.md:1022 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" -msgstr "" -"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " -"轮中按顺序选择客户:" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "定制数据分区" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" -#: ../../source/tutorial-quickstart-xgboost.rst:759 -msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." -msgstr "" -"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " -":code:`num_partitions` 和 :code:`partitioner_type` " -"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "定制的集中/分布式评估" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch`" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -#, fuzzy +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow`" + +#: ../../source/ref-changelog.md:1031 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" -msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:1033 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " -":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" +"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " +"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -#, fuzzy +#: ../../source/ref-changelog.md:1035 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " -":code:`evaluate()` 方法。" - -#: ../../source/tutorial-quickstart-xgboost.rst:831 -#, fuzzy -msgid "Flower simulation" -msgstr "运行模拟" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-quickstart-xgboost.rst:832 -#, fuzzy +#: ../../source/ref-changelog.md:1037 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." -msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " +"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " +"`.md`,并修复了一些较小的细节!" -#: ../../source/tutorial-quickstart-xgboost.rst:866 -#, fuzzy -msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" -msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**小规模更新**" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -#, fuzzy +#: ../../source/ref-changelog.md:1041 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -#, fuzzy -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "然后,我们定义策略和其他超参数:" +#: ../../source/ref-changelog.md:1042 +msgid "" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " +"([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -#, fuzzy +#: ../../source/ref-changelog.md:1043 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" -msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -#, fuzzy +#: ../../source/ref-changelog.md:1044 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -":code:`start_simulation` 的一个关键参数是 " -":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259)" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "参数解析器" +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -#, fuzzy +#: ../../source/ref-changelog.md:1050 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" -msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:1052 #, fuzzy msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" -"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "然后是客户端的参数解析器:" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 " +"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " +"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" +"/contributing-baselines.html)。" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -#, fuzzy +#: ../../source/ref-changelog.md:1054 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -#, fuzzy -msgid "We also have an argument parser for simulation:" -msgstr "我们还有一个用于模拟的参数解析器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -#, fuzzy -msgid "This integrates all arguments for both client and server sides." -msgstr "这整合了客户端和服务器端的所有参数。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "命令示例" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -#, fuzzy +#: ../../source/ref-changelog.md:1056 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "然后,我们在每个客户终端上启动客户机:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -#, fuzzy -msgid "To run the same experiment with Flower simulation:" -msgstr "运行与 Flower 模拟相同的实验:" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." +msgstr "" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " +"演示了一个简单的 C++ 客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:1058 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "从零开始制定策略" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " +"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1060 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__)。" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " +"Python 版本的实验支持。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1062 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." -msgstr "" -"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " -"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:1064 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " -"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "让我们从头开始构建一个新的``Strategy``!" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "准备工作" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " +"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1066 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "安装依赖项" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "首先,我们安装必要的软件包:" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:1068 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " +"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:1070 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "数据加载" +"**允许在所有内置策略中同时使用联邦评价和集中评估** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:1072 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " -"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "模型培训/评估" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " +"`0.0`来禁用联邦评估。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:1074 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" +"**两本新的 Jupyter Notebook 教程** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Flower 客户端" +#: ../../source/ref-changelog.md:1076 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:1078 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " -"传递给客户端,并使用它记录其他详细信息:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" +"*联邦学习简介*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "从零开始构建策略" +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" +"*在联邦学习中使用策略*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1082 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " -"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " +"([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:1084 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "回顾" +#: ../../source/ref-changelog.md:1086 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:1088 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." -msgstr "" -"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " -"``Strategy`` " -"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:1090 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:1092 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:1096 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "自定义客户端" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " +"运行([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1097 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " +"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:1098 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " -"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" -" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:1099 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "步骤 0:准备工作" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:1100 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" +"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "步骤 1:重温 NumPyClient" +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:1105 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" -msgstr "" -"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " -"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " -"``client_fn`` 的函数来创建该类的实例:" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:1106 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" -"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " -"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:1107 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1108 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " -"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:1109 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " -"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " -"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " -"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " -"只是建立在``Client``之上的便捷抽象类。" +"**删除过时的 DefaultStrategy 策略** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:1110 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除已过时的对 eval_fn 返回值准确性的支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:1111 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:1117 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1119 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " -"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " -"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " -"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " +"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation`" +" 使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:1121 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " -"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " -"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" +"**新的 Jupyter Notebook 代码示例** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:1123 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " -"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " -"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" -" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "步骤 3:自定义序列化" +"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " +"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:1125 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:1127 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." -msgstr "" -"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" -" Python 对象。" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:1129 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:1131 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " -"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " -"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "我们的定制序列化/反序列化功能" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " +"Flower 就变得更容易了。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:1133 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " -"``sparse_bytes_too_ndarray`` 中。" +"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " +"`FedAvg`实现迈出的第一步。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:1135 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "客户端" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1137 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " +"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:1139 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " -"序列化从网络中获取的参数。" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** " +"([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:1141 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." -msgstr "" -"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " -"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:1143 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "服务器端" +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:1147 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " -"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "正如你所看到的,``evaluate``中只修改了一行:" +"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:1149 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "然后将汇总结果序列化:" +#: ../../source/ref-changelog.md:1151 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "现在我们可以运行自定义序列化示例!" +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1155 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " -"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" +"**更新**`FedAdam`**和**`FedYogi`**战略** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:1157 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "阅读Flower文档 `__" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1159 msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "查看 Flower 代码示例 `__" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" +"**初始化** `start_simulation` **使用客户端 ID 列表** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:1161 msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "使用 \"Flower Baselines \"进行研究 `__" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" +"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " +"`int` 标识符访问的数据分区。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/ref-changelog.md:1165 msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "观看 2023 年Flower峰会视频 `__" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 " +"([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "开始使用Flower" +#: ../../source/ref-changelog.md:1166 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" +"通过 `flwr.__version__` 公开 Flower 版本 " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "欢迎阅读Flower联邦学习教程!" +#: ../../source/ref-changelog.md:1167 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" +"`app.py`中的 `start_server`现在会返回一个 `History` " +"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:1168 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " -"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" +"使 `max_workers`(由 " +"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "让我们开始吧!" +#: ../../source/ref-changelog.md:1169 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1170 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:1171 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "还有更多底层更改、库更新、文档更改和工具改进!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:1175 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "加载数据" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy +#: ../../source/ref-changelog.md:1177 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " -"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " +"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" + +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:1183 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " -"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" +"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:1185 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " +"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " +"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -#, fuzzy +#: ../../source/ref-changelog.md:1187 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " -"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" +"该功能仍处于试验阶段,因此无法保证 API " +"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:1189 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " -"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " -"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1191 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:1192 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." -msgstr "" -"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " -"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "步骤 1:使用 PyTorch 进行集中训练" +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" +"**新的 PyTorch Lightning 代码示例** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1196 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." -msgstr "" -"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" -" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " -"minute blitz " -"`__。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "定义模型" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1198 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" -msgstr "" -"我们使用` PyTorch 教程 " -"`__ 中描述的简单 CNN:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "让我们继续进行常规的训练和测试功能:" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "训练模型" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1204 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:1205 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " -"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" +"当 `min_available_clients` 配置错误时发出警告 " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "步骤 2:使用 Flower 联邦学习" +#: ../../source/ref-changelog.md:1206 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:1207 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." -msgstr "" -"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " -"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "更新模型参数" +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1212 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1214 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " -"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" +" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1216 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" -msgstr "" -"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " -"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "实现 Flower 客户端" +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " +"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1220 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" +"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1222 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和``evaluate`` 三个方法:" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " +"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters``: 返回当前本地模型参数" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:1228 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(摘要) FedOpt" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:1233 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" -"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " -"客户端实现,它将一切都整合在一起:" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:1235 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " -"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " -"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " -"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " -"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " -"``FlowerClient.evaluate``)。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "使用虚拟客户端引擎" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " +"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:1237 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " -"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " -"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " +"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " +"指标字典,以便服务器跟踪。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:1239 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " -"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " -"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " -"``evaluate`` 时,它就会调用 " -"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " -"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" +"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " +"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " +"`return loss, {\"accuracy\": accuracy}`。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "开始训练" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:1243 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." -msgstr "" -"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " -"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " -"``flwr.simulation.start_simulation`` 启动实际模拟。" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:1245 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" -" (FedAvg)。" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " +"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:1247 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " -"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "幕后" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "MXNet 示例和文档" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1251 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " -"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" -" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:1255 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." -msgstr "" -"然后,Flower 会要求选定的 10 " -"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "准确度在哪里找?" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1257 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " -"float(准确度)}``去哪儿了?" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " +"`Parameters` 类取代(例如在 `Strategy`中)。参数 " +"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1259 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " -"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" +" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1261 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" -" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" +"已弃用 `flwr.server.Server.evaluate`,改用 " +"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 -msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1267 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:1269 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " -"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " +"\"方法进行。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1271 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -"其他两类指标(`losses_centralized`` 和 " -"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" +"内置策略支持名为 \"initial_parameters " +"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "结束语" +#: ../../source/ref-changelog.md:1290 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1294 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " -"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " -"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " +"`flwr.server.strategy.FedAvg`)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/ref-changelog.md:1300 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "使用联邦学习策略" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1302 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " -"`___)。" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " +"返回几乎任意的值,并在服务器端使用它们!" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1304 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " -"`PyTorch `__)。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "让我们超越 FedAvg,采用Flower策略!" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "策略定制" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " +"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1306 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " +"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " +"`float, int, Dict[str, Scalar]`)。详见下面的示例。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "服务器端参数 **初始化**" +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1323 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" -" 允许您直接将初始参数传递给策略:" +"**在**`Client.fit` " +"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1325 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "从定制战略开始" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1327 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " +"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-changelog.md:1329 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "服务器端参数**评估**" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1350 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "改进文档" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1354 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1355 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" -msgstr "" -"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " -"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "向/从客户端发送/接收任意值" +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "错误修正:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1359 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " -"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " -"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " -"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " -"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " +"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" -msgstr "" -"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " -"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "重要变更:" + +#: ../../source/ref-changelog.md:1365 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" -msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1366 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " -"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" +"添加了一个新的 NumPyClient(除现有的 KerasClient " +"之外)([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1367 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " -"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" -" ``evaluate`` 中的第三个返回值。" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "扩大联邦学习的规模" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 -msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "不兼容的更改:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1373 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " -"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " -"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " -"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " +"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " +"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" -msgstr "" -"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " -"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " -"个客户端!" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." -msgstr "" -"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "什么是联邦学习?" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/ref-changelog.md:1381 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -"在本教程中,你将了解什么是联邦学习,用 Flower " -"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " +"。迁移时请使用 `FedAvg`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1382 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/ref-changelog.md:1383 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" -" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "经典机器学习" +"删除了策略界面中目前未使用的 " +"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1384 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-changelog.md:1385 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" +"改进了 `Strategy` " +"docstrings([#470](https://github.com/adap/flower/pull/470))。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "模型和数据" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "项目实例" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-example-projects.rst:4 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " +"`PyTorch `_ 或 `TensorFlow " +"`_。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "使用数据训练模型" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/ref-example-projects.rst:10 +#, fuzzy msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" +"The following examples are available as standalone projects. Quickstart " +"TensorFlow/Keras ---------------------------" +msgstr "以下示例可作为独立项目使用。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/ref-example-projects.rst:14 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" -msgstr "" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "手机上的数据" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-example-projects.rst:17 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -"值得一提的是,这个 \"其他地方 " -"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" +"`TensorFlow快速入门 (代码) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/ref-example-projects.rst:18 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "数据存在于多种设备中" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-example-projects.rst:19 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" +"`TensorFlow快速入门 (博客) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "集中数据收集" +#: ../../source/ref-example-projects.rst:23 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" +msgstr "PyTorch快速入门" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/ref-example-projects.rst:25 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" +#: ../../source/ref-example-projects.rst:28 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" +"`PyTorch快速入门 (代码) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "集中模型训练" +#: ../../source/ref-example-projects.rst:29 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`PyTorch快速入门 (教程) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "经典机器学习面临的挑战" +#: ../../source/ref-example-projects.rst:33 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch: 从集中式到联邦式" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-example-projects.rst:35 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#: ../../source/ref-example-projects.rst:37 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" +"PyTorch: 从集中式到联邦式(代码) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "可集中管理" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-example-projects.rst:38 +#, fuzzy msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" +"PyTorch: 从集中式到联邦式(教程) `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "无法集中" +#: ../../source/ref-example-projects.rst:42 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "树莓派和 Nvidia Jetson 上的联邦学习" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/ref-example-projects.rst:44 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/ref-example-projects.rst:46 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -"**法规**: " -"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " +"`_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-example-projects.rst:47 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -"**用户偏好**: " -"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " +"`_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#: ../../source/ref-faq.rst:4 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." -msgstr "" -"**数据量**: " -"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "集中式机器学习不起作用的例子包括:" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/ref-faq.rst:8 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "用多家医院的敏感医疗记录训练癌症检测模型" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/ref-faq.rst:10 msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "不同组织的财务信息,以侦查财务欺诈行为" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "通过电动汽车的定位数据更好地预测续航里程" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "端到端加密信息可训练出更好的自动完成模型" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower 模拟 PyTorch " +"`_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/ref-faq.rst:11 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -"像 `Brave `__浏览器或 `Signal " -"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" +"`Flower模拟TensorFlow/Keras " +"`_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "联邦学习" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/ref-faq.rst:15 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "集中式机器学习:将数据转移到计算中心" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"请点击此处查看有关嵌入式设备联邦学习的 " +"\"博文\"`_和相应的" +" \"GitHub 代码示例\"`_。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "联邦式(机器)学习:将计算转移到数据上" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/ref-faq.rst:19 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/ref-faq.rst:21 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" +"`Android Kotlin example `_" +msgstr "`Android Kotlin 示例 `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "联邦学习的五个步骤" +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "Android Java 示例 `_" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "步骤 0:初始化全局模型" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/ref-faq.rst:26 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "初始化全局模型" +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" +#: ../../source/ref-faq.rst:30 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "扩大联邦学习的规模" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#: ../../source/ref-faq.rst:31 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" +"`Flower meets Nevermined GitHub Repository `_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "发送全局模型" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/ref-faq.rst:32 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower meets Nevermined YouTube 视频 " +"`_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/ref-faq.rst:33 +#, fuzzy msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"`Flower meets KOSMoS `_." msgstr "" -"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" -"(mini-batches)。" +"`Flower meets KOSMoS `_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#: ../../source/ref-faq.rst:34 +msgid "" +"`Flower meets Talan blog post `_ ." msgstr "" +"`Flower meets Talan博文 `_ 。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "根据本地数据进行训练" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "步骤 3:将模型参数更新返回服务器" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/ref-faq.rst:35 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" +"`Flower meets Talan GitHub Repository " +"`_ ." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "发送模型参数更新" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "步骤 4:将模型更新聚合到新的全局模型中" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "遥测功能" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/ref-telemetry.md:3 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " -"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/ref-telemetry.md:5 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" -"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " -"*Federated Averaging* (`McMahan等人,2016 " -"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " -"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" -" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" -" 100 个示例的 10 倍。" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" -msgstr "" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "原则" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "聚合模型参数更新" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "我们遵循严格的匿名使用指标收集原则:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/ref-telemetry.md:12 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " -"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " +"\"了解报告的指标。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#: ../../source/ref-telemetry.md:13 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " -"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" +"being-reported)\"部分" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/ref-telemetry.md:14 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "如何退出" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" -" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" +"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " +"服务器或客户端,只需在命令前添加以下内容即可:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#: ../../source/ref-telemetry.md:24 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" -" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " +"Flower telemetry。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "联邦分析" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "收集的指标" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "Flower 遥测技术收集以下指标:" + +#: ../../source/ref-telemetry.md:30 msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +#: ../../source/ref-telemetry.md:32 msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " -"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " +"工作负载,而且还成功完成了它们。" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " +"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" +" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" +"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "如何检查报告中的内容" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " +"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " +"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "如何联系我们" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" +"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" + +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" +msgstr "快速入门 Android" + +#: ../../source/tutorial-quickstart-android.rst:10 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" + +#: ../../source/tutorial-quickstart-android.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" + +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" +msgstr "快速入门 fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:20 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "并激活虚拟环境:" + +#: ../../source/tutorial-quickstart-fastai.rst:43 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:110 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#, fuzzy +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" + +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" +msgstr "🤗 Transformers快速入门" + +#: ../../source/tutorial-quickstart-huggingface.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-huggingface.rst:14 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:113 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "加载数据" + +#: ../../source/tutorial-quickstart-huggingface.rst:132 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#, fuzzy +msgid "The Model" +msgstr "训练模型" + +#: ../../source/tutorial-quickstart-huggingface.rst:180 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" + +#: ../../source/tutorial-quickstart-huggingface.rst:193 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:196 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "客户端" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:296 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#, fuzzy +msgid "The ServerApp" +msgstr "服务器" + +#: ../../source/tutorial-quickstart-huggingface.rst:332 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:371 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:376 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" +msgstr "快速入门 iOS" + +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " +"`_ 中运行一切。对于在 iOS 中实现 " +"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" + +#: ../../source/tutorial-quickstart-ios.rst:15 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" + +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" +msgstr "或者Poetry:" + +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-xgboost.rst:55 +msgid "Flower Client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-quickstart-ios.rst:36 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " +"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " +"中实现并存储。客户端实现如下:" + +#: ../../source/tutorial-quickstart-ios.rst:72 +msgid "" +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " +":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " +"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " +"`_ 以了解更多有关应用程序的信息。" + +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" + +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." +msgstr "" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " +"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " +"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " +"中完成。" + +#: ../../source/tutorial-quickstart-ios.rst:99 +#, fuzzy +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." +msgstr "" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " +"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" + +#: ../../source/tutorial-quickstart-ios.rst:102 +#, fuzzy +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" + +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " +":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " +"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" + +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-scikitlearn.rst:167 +#: ../../source/tutorial-quickstart-xgboost.rst:341 +msgid "Flower Server" +msgstr "Flower 服务器" + +#: ../../source/tutorial-quickstart-ios.rst:131 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " +"Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +msgid "Train the model, federated!" +msgstr "联邦训练模型!" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-xgboost.rst:567 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " +"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " +"`_。" + +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 " +":code:`examples/ios` 中找到。" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-jax.rst:10 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " +"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " +"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " +"`_" +" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" + +#: ../../source/tutorial-quickstart-jax.rst:16 +msgid "" +"Before we start building our JAX example, we need install the packages " +":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +msgstr "" +"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " +"和 :code:`flwr`:" + +#: ../../source/tutorial-quickstart-jax.rst:24 +msgid "Linear Regression with JAX" +msgstr "使用 JAX 进行线性回归" + +#: ../../source/tutorial-quickstart-jax.rst:26 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a :code:`Linear Regression` model. If you want a more in-depth " +"explanation of what's going on then have a look at the official `JAX " +"documentation `_." +msgstr "" +"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " +"`JAX 文档 `_。" + +#: ../../source/tutorial-quickstart-jax.rst:29 +msgid "" +"Let's create a new file called :code:`jax_training.py` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " +"be imported. In addition, we need to import :code:`sklearn` since we use " +":code:`make_regression` for the dataset and :code:`train_test_split` to " +"split the dataset into a training and test set. You can see that we do " +"not yet import the :code:`flwr` package for federated learning. This will" +" be done later." +msgstr "" +"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " +"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " +":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " +"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" + +#: ../../source/tutorial-quickstart-jax.rst:43 +msgid "" +"The :code:`load_data()` function loads the mentioned training and test " +"sets." +msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" + +#: ../../source/tutorial-quickstart-jax.rst:53 +msgid "" +"The model architecture (a very simple :code:`Linear Regression` model) is" +" defined in :code:`load_model()`." +msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" + +#: ../../source/tutorial-quickstart-jax.rst:65 +msgid "" +"We now need to define the training (function :code:`train()`), which " +"loops over the training set and measures the loss (function " +":code:`loss_fn()`) for each batch of training examples. The loss function" +" is separate since JAX takes derivatives with a :code:`grad()` function " +"(defined in the :code:`main()` function and called in :code:`train()`)." +msgstr "" +"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " +":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " +"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" + +#: ../../source/tutorial-quickstart-jax.rst:83 +msgid "" +"The evaluation of the model is defined in the function " +":code:`evaluation()`. The function takes all test examples and measures " +"the loss of the linear regression model." +msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" + +#: ../../source/tutorial-quickstart-jax.rst:94 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the :code:`jax.grad()` function is defined in " +":code:`main()` and passed to :code:`train()`." +msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " +"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " +":code:`train()`。" + +#: ../../source/tutorial-quickstart-jax.rst:111 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" + +#: ../../source/tutorial-quickstart-jax.rst:117 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" + +#: ../../source/tutorial-quickstart-jax.rst:121 +msgid "JAX meets Flower" +msgstr "JAX 结合 Flower" + +#: ../../source/tutorial-quickstart-jax.rst:123 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +":code:`jax_training.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server*, which averages all received " +"parameter updates. This describes one round of the federated learning " +"process, and we repeat this for multiple rounds." +msgstr "" +"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " +":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" + +#: ../../source/tutorial-quickstart-jax.rst:145 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined JAX training in :code:`jax_training.py`. Our" +" *client* needs to import :code:`flwr`, but also :code:`jax` and " +":code:`jaxlib` to update the parameters on our JAX model:" +msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " +":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " +":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" + +#: ../../source/tutorial-quickstart-jax.rst:160 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " +"easier to implement than :code:`Client` if you use a framework with good " +"NumPy interoperability (like JAX) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" +" to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" +"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " +"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " +":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid ":code:`set_parameters (optional)`" +msgstr ":code:`set_parameters (可选)`" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "transform parameters to NumPy :code:`ndarray`'s" +msgstr "将参数转换为 NumPy :code:`ndarray`格式" + +#: ../../source/tutorial-quickstart-jax.rst:174 +msgid "get the updated local model parameters and return them to the server" +msgstr "获取更新后的本地模型参数并返回服务器" + +#: ../../source/tutorial-quickstart-jax.rst:178 +msgid "return the local loss to the server" +msgstr "向服务器返回本地损失值" + +#: ../../source/tutorial-quickstart-jax.rst:180 +msgid "" +"The challenging part is to transform the JAX model parameters from " +":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" +" `NumPyClient`." +msgstr "" +"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " +"`NumPyClient` 兼容。" + +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`evaluate()` previously " +"defined in :code:`jax_training.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " +":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" + +#: ../../source/tutorial-quickstart-jax.rst:251 +msgid "Having defined the federation process, we can run it." +msgstr "定义了联邦进程后,我们就可以运行它了。" + +#: ../../source/tutorial-quickstart-jax.rst:280 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" + +#: ../../source/tutorial-quickstart-jax.rst:285 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " +"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" + +#: ../../source/tutorial-quickstart-jax.rst:288 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" + +#: ../../source/tutorial-quickstart-mlx.rst:5 +#, fuzzy +msgid "Quickstart MLX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-mlx.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:57 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:106 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:122 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:166 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:190 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:212 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:218 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:231 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:240 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:255 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:275 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:285 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:290 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "把所有东西放在一起" + +#: ../../source/tutorial-quickstart-mlx.rst:344 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:378 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:402 +#: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:407 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "快速入门Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"请参阅 `完整代码示例 `_\" 了解更多信息。" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" + +#: ../../source/tutorial-quickstart-pytorch.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch.rst:12 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:27 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:121 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:159 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:184 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:236 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:294 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:323 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:365 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#, fuzzy +msgid "Video tutorial" +msgstr "教程" + +#: ../../source/tutorial-quickstart-pytorch.rst:376 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "快速入门 PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "scikit-learn快速入门" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " +"Regression` 模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "或者直接使用 Poetry 安装所有依赖项:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " +":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#, fuzzy +msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgstr "设置:code:`sklean`的LogisticRegression模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid ":code:`set_initial_params()`" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "初始化 Flower 服务器将要求的模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" +"更多详情请查看 :code:`utils.py`` 这里 " +"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " +"还需要导入几个软件包,如 Flower 和 scikit-learn:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#, fuzzy +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +":code:`FederatedDataset.load_partition()` method loads the partitioned " +"training set for each partition ID defined in the :code:`--partition-id` " +"argument." +msgstr "" +"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " +"\"Flower Datasets " +"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" +" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Flower 服务器通过一个名为 :code:`Client` " +"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" +" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " +"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "以 NumPy ndarrays 列表形式返回模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid ":code:`set_parameters` (optional)" +msgstr ":code:`set_parameters` (可选)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "用从服务器接收到的参数更新本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:122 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "直接导入 :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "set the local model weights" +msgstr "设置本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +msgid "train the local model" +msgstr "训练本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#, fuzzy +msgid "return the updated local model weights" +msgstr "接收更新的本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +msgid "test the local model" +msgstr "测试本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "The methods can be implemented in the following way:" +msgstr "这些方法可以通过以下方式实现:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " +"implement a client of type :code:`NumPyClient` you'll need to first call " +"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" +" the client which server to connect to. In our case we can run the server" +" and the client on the same machine, therefore we use " +":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we pass to the client." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +" :code:`server_address`。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" +"learn。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:172 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy. Note that we also make use of Flower" +" Datasets here to load the test split of the MNIST dataset for server-" +"side evaluation." +msgstr "" +"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " +"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" +":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " +":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " +"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " +":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:239 +#: ../../source/tutorial-quickstart-xgboost.rst:575 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:246 +#: ../../source/tutorial-quickstart-xgboost.rst:582 +msgid "Open another terminal and start the second client:" +msgstr "打开另一台终端,启动第二个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:252 +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#, fuzzy +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "快速入门 TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:118 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:147 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:178 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:212 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:247 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:299 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "XGBoost快速入门" + +#: ../../source/tutorial-quickstart-xgboost.rst:14 +msgid "Federated XGBoost" +msgstr "联邦化 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:16 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" +"EXtreme Gradient " +"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" +" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" + +#: ../../source/tutorial-quickstart-xgboost.rst:23 +msgid "Why federated XGBoost?" +msgstr "为什么选择联邦 XGBoost?" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " +"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" + +#: ../../source/tutorial-quickstart-xgboost.rst:30 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " +"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " +"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" +"comprehensive `_),以运行各种实验。" + +#: ../../source/tutorial-quickstart-xgboost.rst:37 +msgid "Environment Setup" +msgstr "环境设定" + +#: ../../source/tutorial-quickstart-xgboost.rst:39 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-xgboost.rst:41 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" + +#: ../../source/tutorial-quickstart-xgboost.rst:47 +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:60 +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:87 +msgid "Dataset partition and hyper-parameter selection" +msgstr "数据集划分和超参数选择" + +#: ../../source/tutorial-quickstart-xgboost.rst:89 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:102 +#, fuzzy +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " +"the partition for the given client based on :code:`partition_id`:" +msgstr "" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" +" :code:`node_id` 为给定客户端加载分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:121 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" + +#: ../../source/tutorial-quickstart-xgboost.rst:134 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:158 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " +"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" + +#: ../../source/tutorial-quickstart-xgboost.rst:181 +msgid "Flower client definition for XGBoost" +msgstr "用于 XGBoost 的 Flower 客户端定义" + +#: ../../source/tutorial-quickstart-xgboost.rst:183 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " +":code:`XgbClient` 类。" + +#: ../../source/tutorial-quickstart-xgboost.rst:205 +msgid "" +"All required parameters defined above are passed to :code:`XgbClient`'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:207 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " +":code:`evaluate` 方法如下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" +"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " +":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " +":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" + +#: ../../source/tutorial-quickstart-xgboost.rst:262 +#, fuzzy +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. From the second round, we load the global " +"model sent from server to new build Booster object, and then update model" +" weights on local training data with function :code:`local_boost` as " +"follows:" +msgstr "" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " +"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " +":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:281 +#, fuzzy +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`bst_input.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" +"给定 :code:`num_local_round`,我们通过调用 " +":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " +"树并发送给服务器。" + +#: ../../source/tutorial-quickstart-xgboost.rst:313 +#, fuzzy +msgid "" +"In :code:`evaluate`, after loading the global model, we call " +":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" +" value will be returned." +msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:316 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:332 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement :code:`Client` and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" +"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " +":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " +":code:`server_address`。" + +#: ../../source/tutorial-quickstart-xgboost.rst:343 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" +" FL。" + +#: ../../source/tutorial-quickstart-xgboost.rst:346 +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" +"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " +"FedXgbBagging。" + +#: ../../source/tutorial-quickstart-xgboost.rst:348 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "我们首先定义了 XGBoost bagging聚合策略。" + +#: ../../source/tutorial-quickstart-xgboost.rst:380 +#, fuzzy +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients. The :code:`config_func` " +"function is to return the current FL round number to client's " +":code:`fit()` and :code:`evaluate()` methods." +msgstr "" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " +"值并求取平均值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:384 +msgid "Then, we start the server:" +msgstr "然后,我们启动服务器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:396 +msgid "Tree-based bagging aggregation" +msgstr "基于树的bagging聚合" + +#: ../../source/tutorial-quickstart-xgboost.rst:398 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:400 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " +":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " +":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:496 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" +" 树:" + +#: ../../source/tutorial-quickstart-xgboost.rst:555 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " +"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:560 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:565 +msgid "Launch Federated XGBoost!" +msgstr "启动联邦 XGBoost!" + +#: ../../source/tutorial-quickstart-xgboost.rst:641 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " +"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" + +#: ../../source/tutorial-quickstart-xgboost.rst:646 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-xgboost.rst:650 +msgid "Comprehensive Federated XGBoost" +msgstr "综合的联邦 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:652 +#, fuzzy +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" +"comprehensive 示例 (`完整代码 " +"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" + +#: ../../source/tutorial-quickstart-xgboost.rst:659 +#, fuzzy +msgid "Cyclic training" +msgstr "集中式训练" + +#: ../../source/tutorial-quickstart-xgboost.rst:661 +#, fuzzy +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" +"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " +"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " +"树将传递给下一个客户端,作为下一轮提升的初始化模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:665 +#, fuzzy +msgid "" +"To do this, we first customise a :code:`ClientManager` in " +":code:`server_utils.py`:" +msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:705 +#, fuzzy +msgid "" +"The customised :code:`ClientManager` samples all available clients in " +"each FL round based on the order of connection to the server. Then, we " +"define a new strategy :code:`FedXgbCyclic` in " +":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" +"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " +":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " +":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:746 +#, fuzzy +msgid "" +"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding :code:`aggregate_fit`." +msgstr "" +"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " +"将接收到的客户端模型复制为全局模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:749 +#, fuzzy +msgid "" +"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" +" methods ensure the clients to be sequentially selected given FL round:" +msgstr "" +"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " +"轮中按顺序选择客户:" + +#: ../../source/tutorial-quickstart-xgboost.rst:813 +msgid "Customised data partitioning" +msgstr "定制数据分区" + +#: ../../source/tutorial-quickstart-xgboost.rst:815 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " +":code:`num_partitions` 和 :code:`partitioner_type` " +"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" + +#: ../../source/tutorial-quickstart-xgboost.rst:846 +msgid "Customised centralised/distributed evaluation" +msgstr "定制的集中/分布式评估" + +#: ../../source/tutorial-quickstart-xgboost.rst:848 +#, fuzzy +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server_utils.py`:" +msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:880 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " +":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:883 +#, fuzzy +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client_utils.py`." +msgstr "" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " +":code:`evaluate()` 方法。" + +#: ../../source/tutorial-quickstart-xgboost.rst:887 +#, fuzzy +msgid "Flower simulation" +msgstr "运行模拟" + +#: ../../source/tutorial-quickstart-xgboost.rst:888 +#, fuzzy +msgid "" +"We also provide an example code (:code:`sim.py`) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:922 +#, fuzzy +msgid "" +"After importing all required packages, we define a :code:`main()` " +"function to perform the simulation process:" +msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" + +#: ../../source/tutorial-quickstart-xgboost.rst:977 +#, fuzzy +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a :code:`list`. After the simulation begins, " +"the clients won't need to pre-process their partitions again." +msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" + +#: ../../source/tutorial-quickstart-xgboost.rst:980 +#, fuzzy +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "然后,我们定义策略和其他超参数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1031 +#, fuzzy +msgid "" +"After that, we start the simulation by calling " +":code:`fl.simulation.start_simulation`:" +msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1051 +#, fuzzy +msgid "" +"One of key parameters for :code:`start_simulation` is :code:`client_fn` " +"which returns a function to construct a client. We define it as follows:" +msgstr "" +":code:`start_simulation` 的一个关键参数是 " +":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1094 +msgid "Arguments parser" +msgstr "参数解析器" + +#: ../../source/tutorial-quickstart-xgboost.rst:1096 +#, fuzzy +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients, server " +"and simulation, allowing users to specify different experimental " +"settings. Let's first see the sever side:" +msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1142 +#, fuzzy +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" +" will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" +"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1146 +msgid "Then, the argument parser on client side:" +msgstr "然后是客户端的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1200 +#, fuzzy +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting :code:`--centralised-eval`, as well as an option to perform " +"scaled learning rate based on the number of clients by setting :code" +":`--scaled-lr`." +msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1204 +#, fuzzy +msgid "We also have an argument parser for simulation:" +msgstr "我们还有一个用于模拟的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1282 +#, fuzzy +msgid "This integrates all arguments for both client and server sides." +msgstr "这整合了客户端和服务器端的所有参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1285 +msgid "Example commands" +msgstr "命令示例" + +#: ../../source/tutorial-quickstart-xgboost.rst:1287 +#, fuzzy +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1294 +msgid "Then, on each client terminal, we start the clients:" +msgstr "然后,我们在每个客户终端上启动客户机:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1300 +#, fuzzy +msgid "To run the same experiment with Flower simulation:" +msgstr "运行与 Flower 模拟相同的实验:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." +msgstr "" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "从零开始制定策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " +"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " +"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "让我们从头开始构建一个新的``Strategy``!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "准备工作" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "首先,我们安装必要的软件包:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "数据加载" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "模型培训/评估" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " +"传递给客户端,并使用它记录其他详细信息:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "从零开始构建策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " +"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "回顾" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " +"``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "自定义客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__),并从头开始构建了我们自己的定制策略(`part 3 " +"`__)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " +"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" +" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "步骤 0:准备工作" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "步骤 1:重温 NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " +"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " +"``client_fn`` 的函数来创建该类的实例:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " +"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " +"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " +"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " +"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " +"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"只是建立在``Client``之上的便捷抽象类。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " +"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " +"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " +"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " +"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " +"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " +"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" +" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "步骤 3:自定义序列化" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" +" Python 对象。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " +"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " +"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "我们的定制序列化/反序列化功能" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " +"``sparse_bytes_too_ndarray`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " +"序列化从网络中获取的参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " +"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "服务器端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " +"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "正如你所看到的,``evaluate``中只修改了一行:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "然后将汇总结果序列化:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "现在我们可以运行自定义序列化示例!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " +"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "阅读Flower文档 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "查看 Flower 代码示例 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "使用 \"Flower Baselines \"进行研究 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "观看 2023 年Flower峰会视频 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "开始使用Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "欢迎阅读Flower联邦学习教程!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " +"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "加载数据" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " +"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " +"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +#, fuzzy +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" +"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " +"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " +"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " +"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " +"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "步骤 1:使用 PyTorch 进行集中训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" +" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " +"minute blitz " +"`__。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "定义模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"我们使用` PyTorch 教程 " +"`__ 中描述的简单 CNN:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "让我们继续进行常规的训练和测试功能:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "训练模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " +"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "更新模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " +"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " +"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 客户端。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters``: 返回当前本地模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " +"客户端实现,它将一切都整合在一起:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " +"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " +"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " +"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " +"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " +"``FlowerClient.evaluate``)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " +"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " +"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " +"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " +"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " +"``evaluate`` 时,它就会调用 " +"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " +"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " +"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " +"``flwr.simulation.start_simulation`` 启动实际模拟。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 服务器。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " +"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "开始训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "幕后" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " +"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" +" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"然后,Flower 会要求选定的 10 " +"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "准确度在哪里找?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " +"float(准确度)}``去哪儿了?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " +"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" +" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " +"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"其他两类指标(`losses_centralized`` 和 " +"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "结束语" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " +"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " +"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "使用联邦学习策略" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " +"`___)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " +"`PyTorch `__)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "让我们超越 FedAvg,采用Flower策略!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " +"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "策略定制" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "服务器端参数 **初始化**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" +" 允许您直接将初始参数传递给策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "从定制战略开始" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "服务器端参数**评估**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " +"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "运行模拟" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "向/从客户端发送/接收任意值" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " +"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " +"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " +"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " +"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " +"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " +"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" +" ``evaluate`` 中的第三个返回值。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "扩大联邦学习的规模" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " +"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " +"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " +"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " +"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " +"个客户端!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "什么是联邦学习?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"在本教程中,你将了解什么是联邦学习,用 Flower " +"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "经典机器学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "模型和数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "使用数据训练模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|cc080a555947492fa66131dc3a967603|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "手机上的数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"值得一提的是,这个 \"其他地方 " +"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "数据存在于多种设备中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "集中数据收集" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "集中模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "经典机器学习面临的挑战" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|163117eb654a4273babba413cf8065f5|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "可集中管理" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "无法集中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**法规**: " +"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**用户偏好**: " +"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**数据量**: " +"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "集中式机器学习不起作用的例子包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "用多家医院的敏感医疗记录训练癌症检测模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "不同组织的财务信息,以侦查财务欺诈行为" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "通过电动汽车的定位数据更好地预测续航里程" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "端到端加密信息可训练出更好的自动完成模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"像 `Brave `__浏览器或 `Signal " +"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "联邦学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "集中式机器学习:将数据转移到计算中心" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "联邦式(机器)学习:将计算转移到数据上" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "联邦学习的五个步骤" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "步骤 0:初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|f403fcd69e4e44409627e748b404c086|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|4b00fe63870145968f8443619a792a42|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "发送全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" +"(mini-batches)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|368378731066486fa4397e89bc6b870c|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "根据本地数据进行训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "步骤 3:将模型参数更新返回服务器" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "发送模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "步骤 4:将模型更新聚合到新的全局模型中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " +"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " +"*Federated Averaging* (`McMahan等人,2016 " +"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " +"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" +" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" +" 100 个示例的 10 倍。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|82324b9af72a4582a81839d55caab767|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "聚合模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " +"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " +"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" +" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" +" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "联邦分析" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " +"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " +"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " +"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " +"构建一个简单的联邦学习系统。" + +#~ msgid "Before the release" +#~ msgstr "发布前" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " +#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" +#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " +#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" +#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " +#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "flwr(Python API 参考)" + +#~ msgid "..." +#~ msgstr "..." + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "使用不安全的服务器连接启动客户端:" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "server.strategy.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "server.strategy.FedAvgM" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "可配置的 FedAvg 动量策略实施。" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "训练期间使用客户的比例。默认为 0.1。" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "server.strategy.FedMedian" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "server.strategy.FedOpt" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "可配置的 FedAdagrad 策略实施。" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "Federated Optim 策略界面。" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "server.strategy.FedProx" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "可配置的 FedProx 策略实施。" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "server.strategy.FedAdagrad" + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "论文: https://arxiv.org/abs/2003.00295" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "server.strategy.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "server.strategy.FedYogi" + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "使用 Yogi 的自适应联合优化。" + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" + +#~ msgid "Paper: https://arxiv.org/abs/1803.01498" +#~ msgstr "论文:https://arxiv.org/abs/1803.01498" + +#~ msgid "server.strategy.Krum" +#~ msgstr "server.strategy.Krum" + +#~ msgid "Configurable Krum strategy implementation." +#~ msgstr "可配置的 Krum 策略实施。" + +#~ msgid "server.strategy.Bulyan" +#~ msgstr "server.strategy.Bulyan" + +#~ msgid "Bulyan strategy implementation." +#~ msgstr "Bulyan策略的实施。" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "server.strategy.FedXgbNnAvg" + +#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." +#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "server.strategy.DPFedAvgAdaptive" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" +#~ "**修复策略的错误返回类型** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" +#~ "两个方法(\"aggregate_fit \"和 " +#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgstr "" +#~ "** 更新 Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "对于客户端就需要做这么多。我们仅需要实现 " +#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" +#~ " :code:`\"0.0.0.0:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " +#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " +#~ "即可。字符串 :code:`\"[::]:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "``DataLoader`` 来包装由此产生的分割集:" + +#~ msgid "|e1dd4b4129b040bea23a894266227080|" +#~ msgstr "|e1dd4b4129b040bea23a894266227080|" + +#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" + +#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" + +#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" + +#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" + +#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" + +#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" + +#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" + +#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" + +#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" + +#~ msgid "|88002bbce1094ba1a83c9151df18f707|" +#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" + +#~ msgid "|391766aee87c482c834c93f7c22225e2|" +#~ msgstr "|391766aee87c482c834c93f7c22225e2|" + +#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" + +#~ msgid "|a23d9638f96342ef9d25209951e2d564|" +#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" + +#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.6.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" +#~ "将``!pip install -q 'flwr[simulation]' torch" +#~ " torchvision matplotlib``更改为``!pip install -q " +#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " +#~ "torch torchvision matplotlib``" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_numpy_client()` by pointing " +#~ "it at the same IP address we " +#~ "used in :code:`server.py`:" +#~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" +#~ " :code:`cifar.py` 加载数据和模型。使用函数 " +#~ ":code:`fl.client.start_numpy_client()` 启动 " +#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " +#~ "IP 地址:" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_numpy_client" +#~ " `_)" +#~ " in the sense that they can be" +#~ " configure by creating a class " +#~ "inheriting, for example, from " +#~ "`flwr.client.NumPyClient `_ and therefore " +#~ "behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " +#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " +#~ "`_\" " +#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " +#~ "管理的客户端还包括:" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" -"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " -"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " -"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" +#~ msgid "driver" +#~ msgstr "服务器" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "" +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" -"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " -"构建一个简单的联邦学习系统。" +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" -#~ msgid "Before the release" -#~ msgstr "发布前" +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" -#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " -#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" + +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." #~ msgstr "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" + +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" -#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " -#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" + +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." -#~ msgstr "" -#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " -#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" -#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" -#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " -#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" -#~ msgid "flwr (Python API reference)" -#~ msgstr "flwr(Python API 参考)" +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" -#~ msgid "..." -#~ msgstr "..." +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgid "Starting a client with an insecure server connection:" -#~ msgstr "使用不安全的服务器连接启动客户端:" +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" -#~ msgid "server.strategy.FedAvg" -#~ msgstr "server.strategy.FedAvg" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "server.strategy.FedAvgM" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." -#~ msgstr "可配置的 FedAvg 动量策略实施。" +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." -#~ msgstr "训练期间使用客户的比例。默认为 0.1。" +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgid "server.strategy.FedMedian" -#~ msgstr "server.strategy.FedMedian" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" + +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgid "server.strategy.FedOpt" -#~ msgstr "server.strategy.FedOpt" +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" -#~ msgid "Configurable FedAdagrad strategy implementation." -#~ msgstr "可配置的 FedAdagrad 策略实施。" +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "基本图像标签" -#~ msgid "Federated Optim strategy interface." -#~ msgstr "Federated Optim 策略界面。" +#~ msgid "The image tag of the base image." +#~ msgstr "基础图像的图像标记。" -#~ msgid "server.strategy.FedProx" -#~ msgstr "server.strategy.FedProx" +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" +#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " +#~ ":ref:`changelogentry` 附录中的一些示例和细节。" -#~ msgid "Configurable FedProx strategy implementation." -#~ msgstr "可配置的 FedProx 策略实施。" +#~ msgid "Open a PR (as shown above)" +#~ msgstr "打开 PR(如上图所示)" -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "server.strategy.FedAdagrad" +#~ msgid "How to write a good PR title" +#~ msgstr "如何撰写好的公关标题" -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" -#~ msgstr "论文: https://arxiv.org/abs/2003.00295" +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" -#~ msgid "Federated learning strategy using Adagrad on server-side." -#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " +#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " +#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" +#~ " 遵守语法规则,以确保清晰。" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "server.strategy.FedAdam" +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "server.strategy.FedYogi" +#~ msgid "Implement Algorithm" +#~ msgstr "执行算法" -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "使用 Yogi 的自适应联合优化。" +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "在代码库中添加 my_new_file.py" -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" +#~ msgid "Improve code in module" +#~ msgstr "改进模块中的代码" -#~ msgid "Paper: https://arxiv.org/abs/1803.01498" -#~ msgstr "论文:https://arxiv.org/abs/1803.01498" +#~ msgid "Change SomeModule" +#~ msgstr "更改 SomeModule" -#~ msgid "server.strategy.Krum" -#~ msgstr "server.strategy.Krum" +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" -#~ msgid "Configurable Krum strategy implementation." -#~ msgstr "可配置的 Krum 策略实施。" +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" -#~ msgid "server.strategy.Bulyan" -#~ msgstr "server.strategy.Bulyan" +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "移除不必要的 XGBoost 依赖性" -#~ msgid "Bulyan strategy implementation." -#~ msgstr "Bulyan策略的实施。" +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "删除 FedAvg 子类化策略中的多余属性" -#~ msgid "server.strategy.FedXgbNnAvg" -#~ msgstr "server.strategy.FedXgbNnAvg" +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" -#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." -#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "添加新的惊人库,用于改进模拟引擎" -#~ msgid "server.strategy.DPFedAvgAdaptive" -#~ msgstr "server.strategy.DPFedAvgAdaptive" +#~ msgid "Changelog entry" +#~ msgstr "更新日志" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" -#~ msgstr "" -#~ "**修复策略的错误返回类型** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" #~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." -#~ msgstr "" -#~ "两个方法(\"aggregate_fit \"和 " -#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" -#~ msgstr "" -#~ "** 更新 Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." -#~ msgstr "" -#~ "对于客户端就需要做这么多。我们仅需要实现 " -#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" -#~ " :code:`\"0.0.0.0:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" -#~ " server_address 。" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." -#~ msgstr "" -#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " -#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " -#~ "即可。字符串 :code:`\"[::]:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" -#~ " server_address 。" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" -#~ msgstr "" -#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " -#~ "``DataLoader`` 来包装由此产生的分割集:" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" -#~ msgid "|e1dd4b4129b040bea23a894266227080|" -#~ msgstr "|e1dd4b4129b040bea23a894266227080|" +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr " 用于将 PR 划分为一般改进。" -#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" -#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "表示不将 PR 添加到更新日志中" -#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" -#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr " 是指在 PR 中添加一般基线更改" -#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" -#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgid " is to add a general examples change to the PR" +#~ msgstr " 是在 PR 中添加对一般示例的修改" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" + +#~ msgid "Note that only one token should be used." +#~ msgstr "请注意,只能使用一个标记。" -#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" -#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" -#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" -#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" -#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" -#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" -#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" -#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" -#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" -#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" -#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" -#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" -#~ msgid "|88002bbce1094ba1a83c9151df18f707|" -#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" -#~ msgid "|391766aee87c482c834c93f7c22225e2|" -#~ msgstr "|391766aee87c482c834c93f7c22225e2|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" -#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" -#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" -#~ msgid "|a23d9638f96342ef9d25209951e2d564|" -#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" -#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" -#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "示例: MXNet - 运行联邦式 MXNet" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.6.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" -#~ "将``!pip install -q 'flwr[simulation]' torch" -#~ " torchvision matplotlib``更改为``!pip install -q " -#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " -#~ "torch torchvision matplotlib``" +#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" +#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " +#~ "`PyTorch - 从集中式到联邦式 " +#~ "`_ 教程类似的示例结构。MXNet" +#~ " 和 PyTorch 非常相似,参考 `此处 " +#~ "`_对 MXNet " +#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " +#~ "`" +#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" #~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_numpy_client()` by pointing " -#~ "it at the same IP address we " -#~ "used in :code:`server.py`:" -#~ msgstr "" -#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" -#~ " :code:`cifar.py` 加载数据和模型。使用函数 " -#~ ":code:`fl.client.start_numpy_client()` 启动 " -#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " -#~ "IP 地址:" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "使用 MXNet 进行 MNIST 训练" #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" -#~ " configure by creating a class " -#~ "inheriting, for example, from " -#~ "`flwr.client.NumPyClient `_ and therefore " -#~ "behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." #~ msgstr "" -#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " -#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " -#~ "`_\" " -#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " -#~ "管理的客户端还包括:" +#~ "首先,我们将简要介绍基于 :code:`Sequential` " +#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " +#~ "`_。" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "实例: PyTorch 和 MNIST 的演练" +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" +#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " +#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " +#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" +#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " +#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" -#~ msgid "Ready... Set... Train!" -#~ msgstr "准备...设置...训练!" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." -#~ msgstr "" -#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的 `Basic MNIST Example " -#~ "`_。您会发现用 " -#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" -#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" #~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " +#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" +#~ "到目前为止,如果你以前使用过 MXNet(甚至 " +#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "现在,让我们看看里面到底发生了什么。" +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet 结合 Flower" #~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" -#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" +#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " +#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " +#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" #~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." #~ msgstr "" -#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" +#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" #~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." -#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " +#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " +#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." -#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" +#~ "实现 Flower *client*基本上意味着要实现 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" +#~ " 或 MXNet),:code:`NumPyClient` 比 " +#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." #~ msgstr "" -#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " -#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" -#~ " 数据集上有独立的数据。" +#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " +#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" -#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " -#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " -#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" +#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " +#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" +#~ " :code:`NumPyClient` 子类告知 Flower " +#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" -#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -#~ ":code:`fl.client.start_client`。" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " +#~ ":code:`Sequential` 模型。" -#~ msgid "A Closer Look" -#~ msgstr "仔细看一下" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" -#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" -#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" +#~ "此示例的完整源代码在:\"MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" +#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" -#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#~ msgid "with the following command sequence:" +#~ msgstr "使用以下命令序列:" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." -#~ msgstr "" -#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " -#~ "下找到,现复制如下。它与 `Basic MNIST Example " -#~ "`_中的网络相同。" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" #~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" -#~ msgstr "" -#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" -#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " -#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " -#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" +#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " +#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" -#~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" -#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#~ msgid "Flower server" +#~ msgstr "Flower 服务器" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " -#~ ":code:`evaluate` " -#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#~ msgid "Give It a Try" -#~ msgstr "试试看" +#~ msgid "Start a Flower Driver API server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ "通过上面的快速入门代码描述,你将对 Flower " -#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" -#~ " Flower 的经验:" +#~ "Flower 1.0: ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" #~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." -#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "server.strategy.Strategy" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "如果已连接,请断开与超级链接的连接。" + +#~ msgid "start\\_driver" +#~ msgstr "启动客户端" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" -#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" -#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" -#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_server` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" +#~ msgid "The Driver object to use." +#~ msgstr "要使用的驱动程序对象。" -#~ msgid "Differential privacy" -#~ msgstr "差别隐私" +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "启动不安全的服务器:" -#~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." -#~ msgstr "" -#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " -#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -#~ "框架中定义的训练模式中。" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "启动支持 SSL 的服务器:" #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." -#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" -#~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." -#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "运行模拟" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." -#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." -#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +#~ msgid "Quickstart MXNet" +#~ msgstr "快速入门 MXNet" #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." -#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" -#~ msgid "Simplifying Assumptions" -#~ msgstr "简化假设" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" -#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " -#~ ":math:`(\\epsilon,\\delta)` 。" - -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." -#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" +#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " +#~ "MXNet 的 `手写数字识别教程 " +#~ "`_\"。" #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." -#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." -#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." -#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" +#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" +#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." -#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" - -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "可定制的噪声注入" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." -#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." -#~ msgstr "" -#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" -#~ msgid "Wrapper-based approach" -#~ msgstr "基于封装的方法" +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" -#~ "在现有工作负载中引入 DP " -#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -#~ ":code:`Strategy` 和 :code:`NumPyClient` " -#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" -#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" +#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " +#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " +#~ "客户端。" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -#~ ":code:`__init__()` " -#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -#~ " :code:`DPFedAvgFixed` 和 " -#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." -#~ msgstr "" -#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" -#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" +#~ msgid "They can be implemented in the following way:" +#~ msgstr "它们可以通过以下方式实现:" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." -#~ msgstr "" -#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " -#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " -#~ "下键入)进行扩充。并且,如果 " -#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " -#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " -#~ "所返回的结果进行后处理。" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" -#~ ":code:`aggregate_fit()`: " -#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " -#~ "1,强制以不加权的方式平均更新。此外,如果 " -#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" -#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " -#~ "之前,对参数进行*预*处理。" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ " :code:`server_address`。" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." -#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" -#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可在 :code:`examples/quickstart-" +#~ "mxnet` 中找到。" -#~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." -#~ msgstr "" -#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " -#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "使用 OpenML 加载 MNIST 数据集" + +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" + +#~ msgid "Shuffles data and its label" +#~ msgstr "对数据及其标签进行洗牌" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "将数据集分割成多个分区" #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" -#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " -#~ "返回的 config 字典,并在其中添加键-值对 " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" +#~ "我们从 `OpenML `_ 中加载 " +#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " +#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " +#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." -#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" +#~ msgid "Let's get stated!" +#~ msgstr "让我们开始吧!" -#~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " -#~ ":code:`fit()` " -#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" -#~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." -#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" -#~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" -#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" -#~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " -#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" -#~ " 值,可以使用下面的脚本。" -#~ msgid "Flower driver SDK." -#~ msgstr "Flower 服务器。" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" -#~ msgid "driver" -#~ msgstr "服务器" +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" -#~ msgid "Get task results." -#~ msgstr "汇总训练结果。" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" -#~ msgid "Request for run ID." -#~ msgstr "Flower 基线申请" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" -#~ msgid "Get client IDs." -#~ msgstr "返回客户端(本身)。" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" -#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -#~ "`_。" -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "快速入门 TensorFlow/Keras" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "传统示例 (`flwr_example`)" +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" + +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." #~ msgstr "" -#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " -#~ "`examples `_" -#~ " 中提供。" +#~ "目前,Flower " +#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" +#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" +#~ " Flower 服务器。" -#~ msgid "Extra Dependencies" -#~ msgstr "额外依赖" +#~ msgid "``3.11``" +#~ msgstr "``1.0.0rc1``" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "默认为 ``22.04``。" + +#~ msgid "Building the SuperLink image" +#~ msgstr "启动服务器" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "默认为 ``flwr/server``。" + +#~ msgid "The Python version of the base image." +#~ msgstr "基础镜像的存储库名称。" + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "默认为 ``22.04``。" + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" + +#~ msgid "Defaults to ``flwr``." +#~ msgstr "默认为 ``flwr/server``。" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." -#~ msgstr "" -#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " -#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" - -#~ msgid "For PyTorch examples::" -#~ msgstr "PyTorch 示例::" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" -#~ msgid "For TensorFlow examples::" -#~ msgstr "TensorFlow 示例::" +#~ msgid "Creating New Messages" +#~ msgstr "创建新信息" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "PyTorch 和 TensorFlow 示例::" +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." -#~ msgstr "" -#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -#~ ":code:`[tool.poems.extras]`)。" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." -#~ msgid "PyTorch Examples" -#~ msgstr "PyTorch 示例" +#~ msgid "Server's side:" +#~ msgstr "在服务器端:" + +#~ msgid "Client's side:" +#~ msgstr "在客户端:" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." -#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "CIFAR-10 图像分类" +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "协议缓冲区的信息类型" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "CIFAR-10 和 CIFAR-100 " -#~ "``_ 是流行的 RGB" -#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " -#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" +#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " +#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" +#~ " `_。" -#~ msgid "First, start a Flower server:" -#~ msgstr "首先,启动 Flower 服务器:" +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "在 :code:`ServerMessage` 代码块中:" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "Within the ClientMessage block:" +#~ msgstr "在 ClientMessage 代码块中:" -#~ msgid "Then, start the two clients in a new terminal window:" -#~ msgstr "然后,在新的终端窗口中启动两个客户端:" +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "完成后,我们将使用:" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "如果编译成功,你应该会看到以下信息:" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 图像分类" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "序列化和反序列化函数" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." #~ msgstr "" -#~ "ImageNet-2012 `_ " -#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " -#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" - -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" - -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " +#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" +#~ " :code:`serde.py` 中添加这些函数。" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" +#~ msgid "The four functions:" +#~ msgstr "四种函数:" -#~ msgid "TensorFlow Examples" -#~ msgstr "TensorFlow 示例" +#~ msgid "Sending the Message from the Server" +#~ msgstr "从服务器发送信息" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." -#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Fashion-MNIST 图像分类" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "由客户端接收信息" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" -#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" -#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" +#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" +#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "Within the handle function:" +#~ msgstr "在句柄函数内:" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "And add a new function:" +#~ msgstr "并增加一个新函数:" -#~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "希望您在运行程序时能得到预期的结果!" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "基本图像标签" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgid "The image tag of the base image." -#~ msgstr "基础图像的图像标记。" +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" -#~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." -#~ msgstr "" -#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " -#~ ":ref:`changelogentry` 附录中的一些示例和细节。" +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "打开 PR(如上图所示)" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "Flower 服务器。" -#~ msgid "How to write a good PR title" -#~ msgstr "如何撰写好的公关标题" +#~ msgid "Unreleased" +#~ msgstr "尚未发布" -#~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" -#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." -#~ msgstr "" -#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " -#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " -#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" -#~ " 遵守语法规则,以确保清晰。" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" -#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" -#~ msgid "Implement Algorithm" -#~ msgstr "执行算法" +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "在代码库中添加 my_new_file.py" +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgid "Improve code in module" -#~ msgstr "改进模块中的代码" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgid "Change SomeModule" -#~ msgstr "更改 SomeModule" +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" -#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "移除不必要的 XGBoost 依赖性" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "删除 FedAvg 子类化策略中的多余属性" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgstr "添加新的惊人库,用于改进模拟引擎" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" -#~ msgid "Changelog entry" -#~ msgstr "更新日志" +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" -#~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." -#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" +#~ msgid "Edge Client Engine" +#~ msgstr "边缘客户端引擎" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" -#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "具有边缘客户端引擎的`Flower `核心架构" -#~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." -#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" +#~ msgid "Virtual Client Engine" +#~ msgstr "虚拟客户端引擎" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." -#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "具有虚拟客户端引擎的`Flower `核心架构" -#~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." -#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" -#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" -#~ msgid " is for classifying a PR as a general improvement." -#~ msgstr " 用于将 PR 划分为一般改进。" +#~ msgid "Clone the flower repository." +#~ msgstr "**叉花仓库**" -#~ msgid " is to not add the PR to the changelog" -#~ msgstr "表示不将 PR 添加到更新日志中" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ "请阅读 :doc:`Run Flower using Docker " +#~ "` " +#~ "的第一节,其中更详细地介绍了这一步骤。" -#~ msgid " is to add a general baselines change to the PR" -#~ msgstr " 是指在 PR 中添加一般基线更改" +#~ msgid "``22.04``" +#~ msgstr "``1.0.0rc1``" -#~ msgid " is to add a general examples change to the PR" -#~ msgstr " 是在 PR 中添加对一般示例的修改" +#~ msgid "``23.0.1``" +#~ msgstr "``1.0.0rc1``" -#~ msgid " is to add a general sdk change to the PR" -#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" -#~ msgid " is to add a general simulations change to the PR" -#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" -#~ msgid "Note that only one token should be used." -#~ msgstr "请注意,只能使用一个标记。" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "启动服务器" -#~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" -#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "官方 Ubuntu Docker 映像的版本。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." -#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ msgid "**Via the UI**" +#~ msgstr "**审查 PR**" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." -#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "示例: MXNet - 运行联邦式 MXNet" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "示例: JAX - 运行联邦式 JAX" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." #~ msgstr "" -#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" -#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " -#~ "`PyTorch - 从集中式到联邦式 " -#~ "`_ 教程类似的示例结构。MXNet" -#~ " 和 PyTorch 非常相似,参考 `此处 " -#~ "`_对 MXNet " -#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " -#~ "`" -#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" +#~ "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 " +#~ "`Docker Hub `_" +#~ " 上找到这些镜像。" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" -#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" - -#~ msgid "MNIST Training with MXNet" -#~ msgstr "使用 MXNet 进行 MNIST 训练" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在" +#~ " `_ 找到安装说明。" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" -#~ "首先,我们将简要介绍基于 :code:`Sequential` " -#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " -#~ "`_。" +#~ "在 Linux 上,Docker 命令需要 ``sudo`` " +#~ "权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 `安装后步骤" +#~ " `_进行操作。" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." #~ msgstr "" -#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " -#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " -#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" +#~ "为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " +#~ "映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" + +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" + +#~ msgid "Quickstart" +#~ msgstr "快速入门 JAX" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "如果您想试用 Flower,可以使用以下命令:" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." -#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的" +#~ " Docker 镜像。标签包含使用 Flower、Python 和 Ubuntu" +#~ " 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和" +#~ " Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " -#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" +#~ "``-p :`` 标记会告诉 Docker 将主机的端口" +#~ " ``9091``/``9092`` 映射到容器的端口 ``9091``/`9092``,这样你就可以在" +#~ " ``http://localhost:9091`` 上访问 Driver API,在 " +#~ "``http://localhost:9092`` 上访问 Fleet " +#~ "API。最后,标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " +#~ "``--insecure`` 。" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." -#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "不安全 \"标志启用不安全通信(使用 HTTP,而非 " +#~ "HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " +#~ "`_。" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." -#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" + +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "在主机系统上挂载卷以存储状态" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " -#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" -#~ "到目前为止,如果你以前使用过 MXNet(甚至 " -#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet 结合 Flower" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." #~ msgstr "" -#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " -#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " -#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" +#~ "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 " +#~ "ClientApp 进行 docker 化。" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "假设项目布局如下" + +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" +#~ "首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 " +#~ "``requirements.txt`` 文件。在该文件中,我们列出了 ClientApp " +#~ "需要的所有依赖项。" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." #~ msgstr "" -#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " -#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " -#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " -#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" +#~ "请注意,`flwr `__ " +#~ "已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ "实现 Flower *client*基本上意味着要实现 " -#~ ":code:`flwr.client.Client` 或 " -#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " -#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" -#~ " 或 MXNet),:code:`NumPyClient` 比 " -#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " -#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" +#~ "接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch``" +#~ " 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为" +#~ " ``Dockerfile.supernode`` 的新文件。" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" -#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " -#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" +#~ "在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 " +#~ "SuperNode 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " +#~ "文件复制到映像中并运行 ``pip install`` 来安装 ClientApp " +#~ "依赖项。最后两行,我们将 ``client.py`` 模块复制到映像中,并将入口点设置为 " +#~ "``flower-client-app``,参数为 ``client:app``。参数是将在 " +#~ "ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "启动服务器" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " -#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " -#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" -#~ " :code:`NumPyClient` 子类告知 Flower " -#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" +#~ "我们将图像命名为 ``flwr_supernode``,标签为 " +#~ "``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" + +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "启动服务器" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "让我们来分析一下这条命令的各个部分:" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "`-rm``: 该选项指定容器停止时应自动移除。" + +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "不安全\": 该选项启用不安全通信。" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " -#~ ":code:`Sequential` 模型。" +#~ "要测试在本地运行 Flower,可以创建一个 \"桥接网络 " +#~ "`__\",使用\"--网络 \"参数并传递 Docker " +#~ "网络的名称,以运行超级节点。" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" -#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" + +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" -#~ "此示例的完整源代码在:\"MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" -#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" +#~ "与 SuperNode 映像类似,ServerApp Docker 映像也预装了 " +#~ "Flower 版本,可作为构建自己的 ServerApp 映像的基础。" -#~ msgid "with the following command sequence:" -#~ msgstr "使用以下命令序列:" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "我们将使用与 \"Flower SuperNode \"部分相同的 " +#~ "\"quickstart-pytorch \"示例。如果您还没有这样做,请在继续之前遵循 " +#~ "\"SuperNode 先决条件\"。" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "创建 ServerApp Dockerfile" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." -#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用" +#~ " ``quickstart-pytorch`` 示例,请在 ``examples" +#~ "/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.serverapp``" +#~ " 的新文件。" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." -#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." #~ msgstr "" -#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " -#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" +#~ "在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 " +#~ "ServerApp 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " +#~ "模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 " +#~ "``server:app``。参数是将在 ServerApp 容器内运行的 ServerApp " +#~ "的对象引用(``<模块>:<属性>``)。" + +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "启动服务器" -#~ msgid "Flower server" -#~ msgstr "Flower 服务器" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "启动服务器" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "要测试在本地运行 Flower,可以创建一个 ``bridge network " +#~ "`___,使用 ``--network`` 参数并传递 Docker " +#~ "网络的名称,以运行 ServerApps。" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "启动基于 Ray 的Flower模拟服务器。" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." #~ msgstr "" -#~ "Flower 1.0: ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" -#~ msgid "`Driver` class provides an interface to the Driver API." -#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" +#~ msgid "Run with root user privileges" +#~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" -#~ msgstr "server.strategy.Strategy" +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." -#~ msgstr "如果已连接,请断开与超级链接的连接。" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#~ msgid "start\\_driver" -#~ msgstr "启动客户端" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" + +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" + +#~ msgid "d defaults to None." +#~ msgstr "d 默认为 \"无\"。" + +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "根据二进制/可迭代 E 和 F 更新 R。" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" + +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" + +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." -#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" -#~ "抽象基类 `flwr.server.ClientManager` " -#~ "的实现。如果没有提供实现,`start_server` 将使用 " -#~ "`flwr.server.client_manager.SimpleClientManager`。" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" -#~ msgid "The Driver object to use." -#~ msgstr "要使用的驱动程序对象。" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" -#~ msgid "Starting a driver that connects to an insecure server:" -#~ msgstr "启动不安全的服务器:" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" -#~ msgstr "启动支持 SSL 的服务器:" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" + +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" + +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "run\\_fleet\\_api" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ " +#~ "配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "key shares." +#~ msgstr "关键股份。" + +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg " +#~ "配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "运行模拟" +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" +#~ "字典,例如 {\"\": , \"\": " +#~ "} 来配置后端。 中支持的值是 " +#~ "`flwr.common.typing.ConfigsRecordValues`中包含的值。" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" -#~ msgid "Quickstart MXNet" -#~ msgstr "快速入门 MXNet" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." -#~ msgstr "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" #~ msgid "" #~ "Now that we have all our " @@ -28216,441 +32612,761 @@ msgstr "" #~ "simple distributed training with two " #~ "clients and one server. Our training " #~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" -#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " -#~ "MXNet 的 `手写数字识别教程 " -#~ "`_\"。" +#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的《Deep Learning with PyTorch " +#~ "`_》。" #~ msgid "" #~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " +#~ "import Flower and PyTorch related " #~ "packages:" -#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" -#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" +#~ "我们使用 PyTorch 来加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " +#~ ":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" #~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." -#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." -#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" #~ msgid "" #~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." -#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." -#~ msgstr "" -#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " -#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " -#~ "客户端。" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" #~ msgid "" #~ "Flower provides a convenience class " #~ "called :code:`NumPyClient` which makes it " #~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " #~ "(:code:`set_parameters` is optional though):" #~ msgstr "" #~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " +#~ " PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 " #~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " #~ "是可选的):" -#~ msgid "They can be implemented in the following way:" -#~ msgstr "它们可以通过以下方式实现:" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "可以通过以下方式实现:" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" -#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "pytorch` 中找到。" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" -#~ " 并调用 :code:`fl.client.start_client()` 或 " -#~ ":code:`fl.client.start_numpy_client()`。字符串 " -#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" -#~ " " -#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -#~ " :code:`server_address`。" +#~ "代码:`self.bst`用于保存在各轮中保持一致的 Booster " +#~ "对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" + +#~ msgid "Implementing a Flower client" +#~ msgstr "实现 Flower 客户端" #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" -#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" +#~ "为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` " +#~ "的子类,并实现了 ``get_parameters``、``fit`` 和``evaluate`` " +#~ "三个方法:" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -#~ "`_ 可在 :code:`examples/quickstart-" -#~ "mxnet` 中找到。" +#~ "函数 ``start_simulation`` 接受许多参数,其中包括用于创建 " +#~ "``FlowerClient`` 实例的 " +#~ "``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" +#~ " (FedAvg)。" -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "使用 OpenML 加载 MNIST 数据集" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "对数据及其标签进行洗牌" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "将数据集分割成多个分区" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "" + +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "" + +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "" + +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "" + +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "" + +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "" + +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ "我们从 `OpenML `_ 中加载 " -#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " -#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " -#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "Let's get stated!" -#~ msgstr "让我们开始吧!" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "run\\_server\\_app" +#~ msgstr "run\\_server\\_app" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" +#~ "创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个" +#~ " Client " +#~ "类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" +#~ " `client_fn` 或任何客户端方法(例如,在 `evaluate` " +#~ "方法中加载评估数据)时(重新)创建。" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "需要等待的客户数量。" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" +#~ "列出每个客户的 `client_id`。只有在未设置 `num_clients` " +#~ "时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" +#~ "\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU " +#~ "资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解 " +#~ "`num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray" +#~ " 文档。" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" +#~ "抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server`" +#~ " 将使用 `flwr.server.strategy.FedAvg`。" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_simulation` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } 可以使用空字典(ray_init_args={})来防止向 ray.init " +#~ "传递任何参数。" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray:" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" -#~ msgstr "" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" -#~ msgid "|7f0ee162da38450788493a21627306f7|" -#~ msgstr "" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" +#~ "(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 " +#~ "VCE " +#~ "选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " +#~ "NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " +#~ "文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" -#~ msgstr "" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "**hist** -- 包含训练指标的对象。" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" -#~ msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" -#~ msgstr "" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_了解更多信息。" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" -#~ msgstr "" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" -#~ msgstr "" +#~ msgid "Dependencies" +#~ msgstr "依赖关系" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ "目前,Flower " -#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" -#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" -#~ " Flower 服务器。" +#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " +#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" +#~ " :code:`pip` 来完成:" -#~ msgid "``3.11``" -#~ msgstr "``1.0.0rc1``" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "标准Hugging Face工作流程" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "Handling the data" +#~ msgstr "处理数据" -#~ msgid "Building the SuperLink image" -#~ msgstr "启动服务器" +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" +#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " +#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " +#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "Training and testing the model" +#~ msgstr "训练和测试模型" -#~ msgid "The Python version of the base image." -#~ msgstr "基础镜像的存储库名称。" +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" +#~ "有了创建 trainloader 和 testloader " +#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "Creating the model itself" +#~ msgstr "创建模型本身" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "要创建模型本身,我们只需使用 Hugging Face 的 " +#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " +#~ "distillBERT 模型:" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "Creating the IMDBClient" +#~ msgstr "创建 IMDBClient" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." -#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " +#~ ":code:`PyTorch` 模型:" -#~ msgid "Creating New Messages" -#~ msgstr "创建新信息" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ ":code:`get_parameters` " +#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" + +#~ msgid "Starting the server" +#~ msgstr "启动服务器" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." -#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." -#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "使用 :code:`weighted_average` " +#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" -#~ msgid "Server's side:" -#~ msgstr "在服务器端:" +#~ msgid "Putting everything together" +#~ msgstr "把所有东西放在一起" -#~ msgid "Client's side:" -#~ msgstr "在客户端:" +#~ msgid "We can now start client instances using:" +#~ msgstr "现在我们可以使用:" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" -#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" - -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "协议缓冲区的信息类型" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "他们就能连接到服务器,开始联邦训练。" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" -#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " -#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" -#~ " `_。" - -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "在 :code:`ServerMessage` 代码块中:" - -#~ msgid "Within the ClientMessage block:" -#~ msgstr "在 ClientMessage 代码块中:" +#~ "如果您想查看所有内容,请查看完整的代码示例: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." -#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" +#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " +#~ "联合Hugging Face的工作流程。" -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "完成后,我们将使用:" +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "如果编译成功,你应该会看到以下信息:" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "序列化和反序列化函数" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " -#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" -#~ " :code:`serde.py` 中添加这些函数。" +#~ "请参阅 `完整代码示例 " +#~ "`_ 了解更多信息。" -#~ msgid "The four functions:" -#~ msgstr "四种函数:" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" -#~ msgid "Sending the Message from the Server" -#~ msgstr "从服务器发送信息" +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "在导入 Flower 之前,我们必须先安装它:" #~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" -#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" -#~ msgid "Receiving the Message by the Client" -#~ msgstr "由客户端接收信息" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" #~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" -#~ " :code:`example_response` 函数。记住使用 serde 函数!" +#~ "我们使用 TF 的 Keras 实用程序加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" -#~ msgid "Within the handle function:" -#~ msgstr "在句柄函数内:" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" -#~ msgid "And add a new function:" -#~ msgstr "并增加一个新函数:" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" -#~ msgid "Hopefully, when you run your program you will get the intended result!" -#~ msgstr "希望您在运行程序时能得到预期的结果!" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " +#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" +#~ " :code:`server_address`。" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "Each client will have its own dataset." +#~ msgstr "每个客户都有自己的数据集。" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "Flower 服务器。" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" -#~ msgid "Unreleased" -#~ msgstr "尚未发布" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "tensorflow/client.py` 中找到。" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" -#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" -#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" -#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" diff --git a/doc/source/_static/flower-architecture-ECE.png b/doc/source/_static/flower-architecture-ECE.png deleted file mode 100755 index 8ccc83469c5d..000000000000 Binary files a/doc/source/_static/flower-architecture-ECE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-VCE.png b/doc/source/_static/flower-architecture-VCE.png deleted file mode 100755 index a7ff1a2c2ace..000000000000 Binary files a/doc/source/_static/flower-architecture-VCE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-basic-architecture.svg b/doc/source/_static/flower-architecture-basic-architecture.svg new file mode 100644 index 000000000000..65d0ccc05e96 --- /dev/null +++ b/doc/source/_static/flower-architecture-basic-architecture.svg @@ -0,0 +1,4 @@ + + + +
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Server
ServerApp
SuperLink
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-deployment-engine.svg b/doc/source/_static/flower-architecture-deployment-engine.svg new file mode 100644 index 000000000000..2e8dbdfd2626 --- /dev/null +++ b/doc/source/_static/flower-architecture-deployment-engine.svg @@ -0,0 +1,4 @@ + + + +
 User
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperExec



SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
Deployment Engine Executor
flwr run
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-hub-and-spoke.svg b/doc/source/_static/flower-architecture-hub-and-spoke.svg new file mode 100644 index 000000000000..c97f74f2413d --- /dev/null +++ b/doc/source/_static/flower-architecture-hub-and-spoke.svg @@ -0,0 +1,4 @@ + + + +
 

Client
 

Server
 

Client
 

Client
 

Client
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-1.svg b/doc/source/_static/flower-architecture-multi-run-1.svg new file mode 100644 index 000000000000..4e75224f5b59 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-1.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-2.svg b/doc/source/_static/flower-architecture-multi-run-2.svg new file mode 100644 index 000000000000..b6d20453e98f --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-2.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run.svg b/doc/source/_static/flower-architecture-multi-run.svg new file mode 100644 index 000000000000..91df0c514b52 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture.drawio.png b/doc/source/_static/flower-architecture.drawio.png deleted file mode 100755 index a9c3914a1839..000000000000 Binary files a/doc/source/_static/flower-architecture.drawio.png and /dev/null differ diff --git a/doc/source/_templates/base.html b/doc/source/_templates/base.html index 768c560f4f6a..925e40765b13 100644 --- a/doc/source/_templates/base.html +++ b/doc/source/_templates/base.html @@ -46,9 +46,7 @@ {#- Site title -#} {%- block htmltitle -%} - {% if versions %} - Flower Framework {{ current_version.url }} - {% elif not docstitle %} + {% if not docstitle %} {{ title|striptags|e }} {% elif pagename == master_doc %} {{ docstitle|striptags|e }} diff --git a/doc/source/conf.py b/doc/source/conf.py index 5d434dd729bb..033b345b60cc 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -90,10 +90,10 @@ author = "The Flower Authors" # The full version of the next release, including alpha/beta/rc tags -release = "1.11.0" +release = "1.12.0" # The current released version rst_prolog = """ -.. |stable_flwr_version| replace:: 1.10.0 +.. |stable_flwr_version| replace:: 1.11.1 .. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c .. |ubuntu_version| replace:: 22.04 .. |setuptools_version| replace:: 70.3.0 @@ -122,6 +122,7 @@ "nbsphinx", "sphinx_click", "sphinx_substitution_extensions", + "sphinxext.opengraph", ] # Generate .rst files @@ -195,7 +196,6 @@ def find_test_modules(package_path): "apiref-binaries": "ref-api-cli.html", "fedbn-example-pytorch-from-centralized-to-federated": "example-fedbn-pytorch-from-centralized-to-federated.html", "how-to-use-built-in-middleware-layers": "how-to-use-built-in-mods.html", - "vit-finetune": "flowertune-vit.html", # Restructuring: tutorials "tutorial/Flower-0-What-is-FL": "tutorial-series-what-is-federated-learning.html", "tutorial/Flower-1-Intro-to-FL-PyTorch": "tutorial-series-get-started-with-flower-pytorch.html", @@ -250,8 +250,6 @@ def find_test_modules(package_path): "creating-new-messages": "contributor-how-to-create-new-messages.html", "write-documentation": "contributor-how-to-write-documentation.html", "release-process": "contributor-how-to-release-flower.html", - # Restructuring: contributor explanations - "architecture": "contributor-explanation-architecture.html", # Restructuring: contributor references "good-first-contributions": "contributor-ref-good-first-contributions.html", "secagg": "contributor-ref-secure-aggregation-protocols.html", @@ -265,6 +263,9 @@ def find_test_modules(package_path): "example-mxnet-walk-through": "index.html", "ref-api/flwr.simulation.run_simulation_from_cli": "index.html", "contributor-how-to-create-new-messages": "index.html", + "example-jax-from-centralized-to-federated": "tutorial-quickstart-jax.html", + "architecture": "explanation-flower-architecture.html", + "contributor-explanation-architecture.html": "explanation-flower-architecture.html", } # -- Options for HTML output ------------------------------------------------- @@ -273,7 +274,7 @@ def find_test_modules(package_path): # a list of builtin themes. # html_theme = "furo" -html_title = f"Flower Framework" +html_title = "Flower Framework" html_logo = "_static/flower-logo.png" html_favicon = "_static/favicon.ico" html_baseurl = "https://flower.ai/docs/framework/" diff --git a/doc/source/contributor-explanation-architecture.rst b/doc/source/contributor-explanation-architecture.rst deleted file mode 100644 index a20a84313118..000000000000 --- a/doc/source/contributor-explanation-architecture.rst +++ /dev/null @@ -1,26 +0,0 @@ -Flower Architecture -=================== - -Edge Client Engine ------------------- - -`Flower `_ core framework architecture with Edge Client Engine - -.. figure:: _static/flower-architecture-ECE.png - :width: 80 % - -Virtual Client Engine ---------------------- - -`Flower `_ core framework architecture with Virtual Client Engine - -.. figure:: _static/flower-architecture-VCE.png - :width: 80 % - -Virtual Client Engine and Edge Client Engine in the same workload ------------------------------------------------------------------ - -`Flower `_ core framework architecture with both Virtual Client Engine and Edge Client Engine - -.. figure:: _static/flower-architecture.drawio.png - :width: 80 % diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index 522d124dfd9b..d6acad4afa03 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -26,7 +26,7 @@ Before we can start, we need to meet a few prerequisites in our local developmen default values, others must be specified when building the image. All available build arguments for each image are listed in one of the tables below. -Building the base image +Building the Base Image ----------------------- .. list-table:: @@ -65,6 +65,10 @@ Building the base image - The Flower package to be installed. - No - ``flwr`` or ``flwr-nightly`` + * - ``FLWR_VERSION_REF`` + - A `direct reference `_ without the ``@`` specifier. If both ``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the ``FLWR_VERSION_REF`` has precedence. + - No + - `Direct Reference Examples`_ The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` @@ -84,8 +88,8 @@ and Flower :substitution-code:`|stable_flwr_version|`: In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. Remember that the build arguments as well as the name and tag can be adapted to your needs. These values serve as examples only. -Building the SuperLink/SuperNode or ServerApp image ---------------------------------------------------- +Building a Flower Binary Image +------------------------------ .. list-table:: :widths: 25 45 15 15 @@ -130,3 +134,21 @@ After creating the image, we can test whether the image is working: .. code-block:: bash $ docker run --rm flwr_superlink:0.1.0 --help + +Direct Reference Examples +------------------------- + +.. code-block:: bash + :substitutions: + + # main branch + git+https://github.com/adap/flower.git@main + + # commit hash + git+https://github.com/adap/flower.git@1187c707f1894924bfa693d99611cf6f93431835 + + # tag + git+https://github.com/adap/flower.git@|stable_flwr_version| + + # artifact store + https://artifact.flower.ai/py/main/latest/flwr-|stable_flwr_version|-py3-none-any.whl diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index 8b684e24c658..a844298fdca9 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -8,7 +8,7 @@ You can follow the instructions or choose your preferred setup. Python Version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. +Flower requires at least `Python 3.9 `_, but `Python 3.10 `_ or above is recommended. .. note:: Due to a known incompatibility with `ray `_, diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index d7d647996a3d..3dac8647fa33 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -4,7 +4,7 @@ Get started as a contributor Prerequisites ------------- -- `Python 3.8 `_ or above +- `Python 3.9 `_ or above - `Poetry 1.3 `_ or above - (Optional) `pyenv `_ - (Optional) `pyenv-virtualenv `_ @@ -51,11 +51,11 @@ GitHub:: 2. Let's create the Python environment for all-things Flower. If you wish to use :code:`pyenv`, we provide two convenience scripts that you can use. If you prefer using something else than :code:`pyenv`, create a new environment, activate and skip to the last point where all packages are installed. -* If you don't have :code:`pyenv` installed, the following script that will install it, set it up, and create the virtual environment (with :code:`Python 3.8.17` by default):: +* If you don't have :code:`pyenv` installed, the following script that will install it, set it up, and create the virtual environment (with :code:`Python 3.9.20` by default):: $ ./dev/setup-defaults.sh # once completed, run the bootstrap script -* If you already have :code:`pyenv` installed (along with the :code:`pyenv-virtualenv` plugin), you can use the following convenience script (with :code:`Python 3.8.17` by default):: +* If you already have :code:`pyenv` installed (along with the :code:`pyenv-virtualenv` plugin), you can use the following convenience script (with :code:`Python 3.9.20` by default):: $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script @@ -78,8 +78,8 @@ Create/Delete Virtual Environment :: - $ ./dev/venv-create.sh # Default is 3.8.17 - $ ./dev/venv-delete.sh # Default is 3.8.17 + $ ./dev/venv-create.sh # Default is 3.9.20 + $ ./dev/venv-delete.sh # Default is 3.9.20 Compile ProtoBuf Definitions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/docker/index.rst b/doc/source/docker/index.rst index a070a47cb853..968f01581b34 100644 --- a/doc/source/docker/index.rst +++ b/doc/source/docker/index.rst @@ -33,13 +33,15 @@ Advanced Options set-environment-variables run-as-root-user + run-as-subprocess pin-version use-a-different-version -Run Flower Docker Compose -------------------------- +Run Flower using Docker Compose +------------------------------- .. toctree:: :maxdepth: 1 tutorial-quickstart-docker-compose + run-quickstart-examples-docker-compose diff --git a/doc/source/docker/run-as-subprocess.rst b/doc/source/docker/run-as-subprocess.rst new file mode 100644 index 000000000000..f8c482f632a0 --- /dev/null +++ b/doc/source/docker/run-as-subprocess.rst @@ -0,0 +1,53 @@ +Run ClientApp as a Subprocess +============================= + +In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker container, +rather than running in a separate container. This approach reduces the number of running containers, +which can be beneficial for environments with limited resources. However, it also means that the +ClientApp is no longer isolated from the SuperNode, which may introduce additional security +concerns. + +Prerequisites +------------- + +#. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have been installed + in the SuperNode images. This can be done by extending the SuperNode image: + + .. code-block:: dockerfile + :caption: Dockerfile.supernode + :linenos: + :substitutions: + + FROM flwr/supernode:|stable_flwr_version| + + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-supernode"] + +#. Next, build the SuperNode Docker image by running the following command in the directory where + Dockerfile is located: + + .. code-block:: shell + + $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + + +Run the ClientApp as a Subprocess +--------------------------------- + +Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode to execute +the ClientApp as a subprocess: + +.. code-block:: shell + + $ docker run --rm \ + --detach \ + flwr_supernode:0.0.1 \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address localhost:9094 \ + --isolation subprocess diff --git a/doc/source/docker/run-quickstart-examples-docker-compose.rst b/doc/source/docker/run-quickstart-examples-docker-compose.rst new file mode 100644 index 000000000000..5bdb33e991dd --- /dev/null +++ b/doc/source/docker/run-quickstart-examples-docker-compose.rst @@ -0,0 +1,122 @@ +Run Flower Quickstart Examples with Docker Compose +================================================== + +Flower provides a set of `quickstart examples `_ +to help you get started with the framework. These examples are designed to demonstrate the +capabilities of Flower and by default run using the Simulation Engine. This guide demonstrates +how to run them using Flower's Deployment Engine via Docker Compose. + +.. important:: + + Some quickstart examples may have limitations or requirements that prevent them from running + on every environment. For more information, please see `Limitations`_. + +Prerequisites +------------- + +Before you start, make sure that: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running. +- Docker Compose is `installed `_. + +Run the Quickstart Example +-------------------------- + +#. Clone the quickstart example you like to run. For example, ``quickstart-pytorch``: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/quickstart-pytorch . \ + && rm -rf flower && cd quickstart-pytorch + +#. Download the `compose.yml `_ file into the example directory: + + .. code-block:: bash + + $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ + -o compose.yml + +#. Build and start the services using the following command: + + .. code-block:: bash + + $ docker compose up --build -d + +#. Append the following lines to the end of the ``pyproject.toml`` file and save it: + + .. code-block:: toml + :caption: pyproject.toml + + [tool.flwr.federations.local-deployment] + address = "127.0.0.1:9093" + insecure = true + + .. note:: + + You can customize the string that follows ``tool.flwr.federations.`` to fit your needs. + However, please note that the string cannot contain a dot (``.``). + + In this example, ``local-deployment`` has been used. Just remember to replace + ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` string + and the corresponding ``flwr run .`` command. + +#. Run the example: + + .. code-block:: bash + + $ flwr run . local-deployment + +#. Follow the logs of the SuperExec service: + + .. code-block:: bash + + $ docker compose logs superexec -f + +That is all it takes! You can monitor the progress of the run through the logs of the SuperExec. + +Run a Different Quickstart Example +---------------------------------- + +To run a different quickstart example, such as ``quickstart-tensorflow``, first, shut down the Docker +Compose services of the current example: + +.. code-block:: bash + + $ docker compose down + +After that, you can repeat the steps above. + +Limitations +----------- + +.. list-table:: + :header-rows: 1 + + * - Quickstart Example + - Limitations + * - quickstart-fastai + - None + * - quickstart-huggingface + - None + * - quickstart-jax + - The example has not yet been updated to work with the latest ``flwr`` version. + * - quickstart-mlcube + - The example has not yet been updated to work with the latest ``flwr`` version. + * - quickstart-mlx + - `Requires to run on macOS with Apple Silicon `_. + * - quickstart-monai + - None + * - quickstart-pandas + - None + * - quickstart-pytorch-lightning + - Requires an older pip version that is not supported by the Flower Docker images. + * - quickstart-pytorch + - None + * - quickstart-sklearn-tabular + - None + * - quickstart-tabnet + - The example has not yet been updated to work with the latest ``flwr`` version. + * - quickstart-tensorflow + - Only runs on AMD64. diff --git a/doc/source/docker/tutorial-quickstart-docker-compose.rst b/doc/source/docker/tutorial-quickstart-docker-compose.rst index 93a000295951..7aeae1e2fb6b 100644 --- a/doc/source/docker/tutorial-quickstart-docker-compose.rst +++ b/doc/source/docker/tutorial-quickstart-docker-compose.rst @@ -44,7 +44,7 @@ Step 1: Set Up Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, allowing it to install dependencies in the SuperExec and SuperNode images correctly. -Step 2: Run Flower in insecure mode +Step 2: Run Flower in Insecure Mode ----------------------------------- To begin, start Flower with the most basic configuration. In this setup, Flower @@ -230,7 +230,7 @@ Step 6: Run Flower with TLS [tool.flwr.federations.docker-compose-tls] address = "127.0.0.1:9093" - root-certificates = "superexec-certificates/ca.crt" + root-certificates = "../superexec-certificates/ca.crt" #. Restart the services with TLS enabled: @@ -248,43 +248,64 @@ Step 6: Run Flower with TLS Step 7: Add another SuperNode ----------------------------- -You can add more SuperNodes by duplicating the SuperNode definition in the ``compose.yml`` file. +You can add more SuperNodes and ClientApps by duplicating their definitions in the ``compose.yml`` +file. -Just make sure to give each new SuperNode service a unique service name like ``supernode-3``, ``supernode-4``, etc. +Just give each new SuperNode and ClientApp service a unique service name like ``supernode-3``, +``clientapp-3``, etc. In ``compose.yml``, add the following: .. code-block:: yaml :caption: compose.yml + :substitutions: - services: # other service definitions supernode-3: - user: root - deploy: - resources: - limits: - cpus: "2" + image: flwr/supernode:${FLWR_VERSION:-|stable_flwr_version|} command: + - --insecure - --superlink - superlink:9092 - - --insecure + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" depends_on: - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro + + clientapp-3: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + FROM flwr/clientapp:${FLWR_VERSION:-|stable_flwr_version|} + + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-3:9096 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-3 If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode definition for each new SuperNode service in the ``with-tls.yml`` file. @@ -296,13 +317,18 @@ In ``with-tls.yml``, add the following: .. code-block:: yaml :caption: with-tls.yml - services: # other service definitions supernode-3: command: - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" - --root-certificates - certificates/ca.crt secrets: @@ -315,14 +341,13 @@ Step 8: Persisting the SuperLink State and Enabling TLS To run Flower with persisted SuperLink state and enabled TLS, a slight change in the ``with-state.yml`` file is required: -#. Comment out the lines 3-5 and uncomment the lines 6-10: +#. Comment out the lines 2-4 and uncomment the lines 5-9: .. code-block:: yaml :caption: with-state.yml :linenos: - :emphasize-lines: 3-10 + :emphasize-lines: 2-9 - services: superlink: # command: # - --insecure @@ -371,3 +396,8 @@ Remove all services and volumes: $ docker compose down -v $ docker compose -f certs.yml down -v + +Where to Go Next +---------------- + +* :doc:`run-quickstart-examples-docker-compose` diff --git a/doc/source/docker/tutorial-quickstart-docker.rst b/doc/source/docker/tutorial-quickstart-docker.rst index 29ae6d5f6a43..189d019cb097 100644 --- a/doc/source/docker/tutorial-quickstart-docker.rst +++ b/doc/source/docker/tutorial-quickstart-docker.rst @@ -66,8 +66,8 @@ Open your terminal and run: * ``docker run``: This tells Docker to run a container from an image. * ``--rm``: Remove the container once it is stopped or the command exits. * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of - | the host machine, allowing you to access the Driver API on ``http://localhost:9091`` and - | the Fleet API on ``http://localhost:9092``. + | the host machine, allowing other services to access the Driver API on + | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. * ``--name superlink``: Assign the name ``superlink`` to the container. * ``--detach``: Run the container in the background, freeing up the terminal. @@ -79,32 +79,92 @@ Open your terminal and run: Step 3: Start the SuperNode --------------------------- -The SuperNode Docker image comes with a pre-installed version of Flower and serves as a base for -building your own SuperNode image. +Start two SuperNode containers. -#. Create a SuperNode Dockerfile called ``Dockerfile.supernode`` and paste the following code into it: +#. Start the first container: + + .. code-block:: bash + :substitutions: + + $ docker run --rm \ + -p 9094:9094 \ + --network flwr-network \ + --name supernode-1 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=0 num-partitions=2" \ + --supernode-address 0.0.0.0:9094 \ + --isolation process + + .. dropdown:: Understand the command + + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9094:9094``: Map port ``9094`` of the container to the same port of + | the host machine, allowing other services to access the SuperNode API on + | ``http://localhost:9094``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address + | ``superlink:9092``. + * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the + | number of partitions to ``2`` for the SuperNode configuration. + * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode + | is listening on. + * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate + | independent process. The SuperNode does not attempt to create it. + +#. Start the second container: + + .. code-block:: shell + :substitutions: + + $ docker run --rm \ + -p 9095:9095 \ + --network flwr-network \ + --name supernode-2 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address 0.0.0.0:9095 \ + --isolation process + +Step 4: Start the ClientApp +--------------------------- + +The ClientApp Docker image comes with a pre-installed version of Flower and serves as a base for +building your own ClientApp image. In order to install the FAB dependencies, you will need to create +a Dockerfile that extends the ClientApp image and installs the required dependencies. + +#. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following code into it: .. code-block:: dockerfile - :caption: Dockerfile.supernode + :caption: Dockerfile.clientapp :linenos: :substitutions: - FROM flwr/supernode:|stable_flwr_version| + FROM flwr/clientapp:|stable_flwr_version| WORKDIR /app COPY pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - COPY flower.quickstart-docker.1-0-0.fab . - RUN flwr install flower.quickstart-docker.1-0-0.fab - - ENTRYPOINT ["flower-supernode"] + ENTRYPOINT ["flwr-clientapp"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/supernode:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/supernode image``, version :substitution-code:`|stable_flwr_version|`. + * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. | Any subsequent commands that reference a directory will be relative to this directory. * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file @@ -116,51 +176,37 @@ building your own SuperNode image. | | The ``-U`` flag indicates that any existing packages should be upgraded, and | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``COPY flower.quickstart-docker.1-0-0.fab .``: Copy the - | ``flower.quickstart-docker.1-0-0.fab`` file from the current working directory into - | the container's ``/app`` directory. - * | ``RUN flwr install flower.quickstart-docker.1-0-0.fab``: Run the ``flwr`` install command - | to install the Flower App Bundle locally. - * | ``ENTRYPOINT ["flower-supernode"]``: Set the command ``flower-supernode`` to be + * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be | the default command run when the container is started. .. important:: - Note that `flwr `__ is already installed in the ``flwr/supernode`` + Note that `flwr `__ is already installed in the ``flwr/clientapp`` base image, so only other package dependencies such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a result, the ``flwr`` dependency is removed from the ``pyproject.toml`` after it has been copied into the Docker image (see line 5). -#. Build the Flower App Bundle (FAB): - - .. code-block:: bash - - $ flwr build - -#. Next, build the SuperNode Docker image by running the following command in the directory where - Dockerfile is located: +#. Next, build the ClientApp Docker image by running the following command in the directory where + the Dockerfile is located: .. code-block:: bash - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . .. note:: - The image name was set as ``flwr_supernode`` with the tag ``0.0.1``. Remember that + The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember that these values are merely examples, and you can customize them according to your requirements. -#. Start the first SuperNode container: +#. Start the first ClientApp container: .. code-block:: bash $ docker run --rm \ --network flwr-network \ --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=0,num-partitions=2 + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 .. dropdown:: Understand the command @@ -168,35 +214,28 @@ building your own SuperNode image. * ``--rm``: Remove the container once it is stopped or the command exits. * ``--network flwr-network``: Make the container join the network named ``flwr-network``. * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_supernode:0.0.1``: This is the name of the image to be run and the specific tag + * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag | of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--superlink superlink:9092``: Connect to the SuperLinks Fleet API on the address - | ``superlink:9092``. - * | ``--node-config partition-id=0,num-partitions=2``: Set the partition ID to ``0`` and the - | number of partitions to ``2`` for the SuperNode configuration. + * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address + | ``supernode-1:9094``. -#. Start the second SuperNode container: +#. Start the second ClientApp container: .. code-block:: shell $ docker run --rm \ --network flwr-network \ --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=1,num-partitions=2 + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 -Step 4: Start the SuperExec +Step 5: Start the SuperExec --------------------------- -The procedure for building and running a SuperExec image is almost identical to the SuperNode image. +The procedure for building and running a SuperExec image is almost identical to the ClientApp image. -Similar to the SuperNode image, the SuperExec Docker image comes with a pre-installed version of -Flower and serves as a base for building your own SuperExec image. +Similar to the ClientApp image, you will need to create a Dockerfile that extends the SuperExec +image and installs the required FAB dependencies. #. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following code in: @@ -254,8 +293,7 @@ Flower and serves as a base for building your own SuperExec image. --detach \ flwr_superexec:0.0.1 \ --insecure \ - --executor-config \ - superlink=\"superlink:9091\" + --executor-config superlink=\"superlink:9091\" .. dropdown:: Understand the command @@ -273,7 +311,7 @@ Flower and serves as a base for building your own SuperExec image. * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to | connect to the SuperLink running on port ``9091``. -Step 5: Run the Quickstart Project +Step 6: Run the Quickstart Project ---------------------------------- #. Add the following lines to the ``pyproject.toml``: @@ -297,7 +335,7 @@ Step 5: Run the Quickstart Project $ docker logs -f superexec -Step 6: Update the Application +Step 7: Update the Application ------------------------------ #. Change the application code. For example, change the ``seed`` in ``quickstart_docker/task.py`` @@ -310,39 +348,32 @@ Step 6: Update the Application partition_train_test = partition.train_test_split(test_size=0.2, seed=43) # ... -#. Stop the current SuperNode containers: +#. Stop the current ClientApp containers: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_supernode:0.0.1) + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) -#. Rebuild the FAB and SuperNode image: +#. Rebuild the FAB and ClientApp image: .. code-block:: bash - $ flwr build - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . -#. Launch two new SuperNode containers based on the newly built image: +#. Launch two new ClientApp containers based on the newly built image: .. code-block:: bash $ docker run --rm \ --network flwr-network \ --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=0,num-partitions=2 + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 $ docker run --rm \ --network flwr-network \ --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=1,num-partitions=2 + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 #. Run the updated project: @@ -350,14 +381,16 @@ Step 6: Update the Application $ flwr run . docker -Step 7: Clean Up +Step 8: Clean Up ---------------- Remove the containers and the bridge network: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_supernode:0.0.1) \ + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ + supernode-1 \ + supernode-2 \ superexec \ superlink $ docker network rm flwr-network diff --git a/doc/source/example-jax-from-centralized-to-federated.rst b/doc/source/example-jax-from-centralized-to-federated.rst deleted file mode 100644 index 6b06a288a67a..000000000000 --- a/doc/source/example-jax-from-centralized-to-federated.rst +++ /dev/null @@ -1,282 +0,0 @@ -Example: JAX - Run JAX Federated -================================ - -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: - -.. code-block:: shell - - $ pip install jax jaxlib scikit-learn flwr - - -Linear Regression with JAX --------------------------- - -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. - -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. - -.. code-block:: python - - from typing import Dict, List, Tuple, Callable - import jax - import jax.numpy as jnp - from sklearn.datasets import make_regression - from sklearn.model_selection import train_test_split - - key = jax.random.PRNGKey(0) - -The :code:`load_data()` function loads the mentioned training and test sets. - -.. code-block:: python - - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - # create our dataset and start with similar datasets for different clients - X, y = make_regression(n_features=3, random_state=0) - X, X_test, y, y_test = train_test_split(X, y) - return X, y, X_test, y_test - -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. - -.. code-block:: python - - def load_model(model_shape) -> Dict: - # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } - return params - -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). - -.. code-block:: python - - def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y - return jnp.mean(jnp.square(err)) # mse - - def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: - num_examples = X.shape[0] - for epochs in range(10): - grads = grad_fn(params, X, y) - params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) - # if epochs % 10 == 9: - # print(f'For Epoch {epochs} loss {loss}') - return params, loss, num_examples - -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. - -.. code-block:: python - - def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: - num_examples = X_test.shape[0] - err_test = loss_fn(params, X_test, y_test) - loss_test = jnp.mean(jnp.square(err_test)) - # print(f'Test loss {loss_test}') - return loss_test, num_examples - -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. - -.. code-block:: python - - def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - params, loss, num_examples = train(params, grad_fn, X, y) - evaluation(params, grad_fn, X_test, y_test) - - - if __name__ == "__main__": - main() - -You can now run your (centralized) JAX linear regression workload: - -.. code-block:: python - - python3 jax_training.py - -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. - -JAX meets Flower ----------------- - -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) - -We can already start the *server*: - -.. code-block:: python - - python3 server.py - -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: - -.. code-block:: python - - from typing import Dict, List, Callable, Tuple - - import flwr as fl - import numpy as np - import jax - import jax.numpy as jnp - - import jax_training - - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server - -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. - -.. code-block:: python - - - class FlowerClient(fl.client.NumPyClient): - """Flower client implementing using linear regression and JAX.""" - - def __init__( - self, - params: Dict, - grad_fn: Callable, - train_x: List[np.ndarray], - train_y: List[np.ndarray], - test_x: List[np.ndarray], - test_y: List[np.ndarray], - ) -> None: - self.params= params - self.grad_fn = grad_fn - self.train_x = train_x - self.train_y = train_y - self.test_x = test_x - self.test_y = test_y - - def get_parameters(self, config) -> Dict: - # Return model parameters as a list of NumPy ndarrays - parameter_value = [] - for _, val in self.params.items(): - parameter_value.append(np.array(val)) - return parameter_value - - def set_parameters(self, parameters: List[np.ndarray]) -> Dict: - # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) - for item in params_item: - key = item[0] - value = item[1] - self.params[key] = value - return self.params - - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - print("Start local training") - self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) - results = {"loss": float(loss)} - print("Training results", results) - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - # Set model parameters, evaluate the model on a local test dataset, return result - print("Start evaluation") - self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) - print("Evaluation accuracy & loss", loss) - return ( - float(loss), - num_examples, - {"loss": float(loss)}, - ) - -Having defined the federation process, we can run it. - -.. code-block:: python - - def main() -> None: - """Load data, start MNISTClient.""" - - # Load data - train_x, train_y, test_x, test_y = jax_training.load_data() - grad_fn = jax.grad(jax_training.loss_fn) - - # Load model (from centralized training) and initialize parameters - model_shape = train_x.shape[1:] - params = jax_training.load_model(model_shape) - - # Start Flower client - client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) - - if __name__ == "__main__": - main() - - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: python - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! - -Next Steps ----------- - -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. -Our example is somewhat over-simplified because both clients load the same dataset. - -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? diff --git a/doc/source/explanation-flower-architecture.rst b/doc/source/explanation-flower-architecture.rst new file mode 100644 index 000000000000..22691d6091ac --- /dev/null +++ b/doc/source/explanation-flower-architecture.rst @@ -0,0 +1,180 @@ +##################### + Flower Architecture +##################### + +This page explains the architecture of deployed Flower federated +learning system. + +In federated learning (FL), there is typically one server and a number +of clients that are connected to the server. This is often called a +federation. + +The role of the server is to coordinate the training process. The role +of each client is to receive tasks from the server, execute those tasks +and return the results back to the server. + +This is sometimes called a hub-and-spoke topology: + +.. figure:: ./_static/flower-architecture-hub-and-spoke.svg + :align: center + :width: 600 + :alt: Hub-and-spoke topology in federated learning + :class: no-scaled-link + + Hub-and-spoke topology in federated learning (one server, multiple clients). + +In a real-world deployment, we typically want to run different projects +on such a federation. Each project could use different hyperparameters, +different model architectures, different aggregation strategies, or even +different machine learning frameworks like PyTorch and TensorFlow. + +This is why, in Flower, both the server side and the client side are +split into two parts. One part is long-lived and responsible for +communicating across the network, the other part is short-lived and +executes task-specific code. + +A Flower `server` consists of **SuperLink** and ``ServerApp``: + +- **SuperLink**: a long-running process that forwards task instructions + to clients (SuperNodes) and receives task results back. + +- ``ServerApp``: a short-lived process with project-spcific code that + customizes all server-side aspects of federated learning systems + (client selection, client configuration, result aggregation). This is + what AI researchers and AI engineers write when they build Flower + apps. + +A Flower `client` consists of **SuperNode** and ``ClientApp``: + +- **SuperNode**: a long-running process that connects to the SuperLink, + asks for tasks, executes tasks (for example, "train this model on + your local data") and returns task results back to the SuperLink. + +- ``ClientApp``: a short-lived process with project-specific code that + customizes all client-side aspects of federated learning systems + (local model training and evaluation, pre- and post-processing). This + is what AI researchers and AI engineers write when they build Flower + apps. + +Why SuperNode and SuperLink? Well, in federated learning, the clients +are the actual stars of the show. They hold the training data and they +run the actual training. This is why Flower decided to name them +**SuperNode**. The **SuperLink** is then responsible for acting as the +`missing link` between all those SuperNodes. + +.. figure:: ./_static/flower-architecture-basic-architecture.svg + :align: center + :width: 600 + :alt: Basic Flower architecture + :class: no-scaled-link + + The basic Flower architecture for federated learning. + +In a Flower app project, users will typically develop the ``ServerApp`` +and the ``ClientApp``. All the network communication between `server` +and `clients` is taken care of by the SuperLink and SuperNodes. + +.. tip:: + + For more details, please refer to the |serverapp_link|_ and + |clientapp_link|_ documentation. + +With *multi-run*, multiple ``ServerApp``\s and ``ClientApp``\s are now +capable of running on the same federation consisting of a single +long-running SuperLink and multiple long-running SuperNodes. This is +sometimes referred to as `multi-tenancy` or `multi-job`. + +As shown in the figure below, two projects, each consisting of a +``ServerApp`` and a ``ClientApp``, could share the same SuperLink and +SuperNodes. + +.. figure:: ./_static/flower-architecture-multi-run.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture + :class: no-scaled-link + + Multi-tenancy federated learning architecture with Flower + +To illustrate how multi-run works, consider one federated learning +training run where a ``ServerApp`` and a ``ClientApp`` are participating +in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if +it is selected to participate in the training run. + +In ``[run 1]`` below, all the SuperNodes are selected and therefore run +their corresponding ``ClientApp``\s: + +.. figure:: ./_static/flower-architecture-multi-run-1.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 1 + :class: no-scaled-link + + Run 1 in a multi-run federated learning architecture with Flower. + All SuperNodes participate in the training round. + +However, in ``[run 2]``, only the first and third SuperNodes are +selected to participate in the training: + +.. figure:: ./_static/flower-architecture-multi-run-2.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 2 + :class: no-scaled-link + + Run 2 in a multi-run federated learning architecture with Flower. + Only the first and third SuperNodes are selected to participate in the + training round. + +Therefore, with Flower multi-run, different projects (each consisting of +a ``ServerApp`` and ``ClientApp``) can run on different sets of clients. + +To help you start and manage all of the concurrently executing training +runs, Flower offers one additional long-running server-side service +called **SuperExec**. When you type ``flwr run`` to start a new training +run, the ``flwr`` CLI bundles your local project (mainly your +``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The +**SuperExec** will then take care of starting and managing your +``ServerApp``, which in turn selects SuperNodes to execute your +``ClientApp``. + +This architecture allows many users to (concurrently) run their projects +on the same federation, simply by typing ``flwr run`` on their local +developer machine. + +.. figure:: ./_static/flower-architecture-deployment-engine.svg + :align: center + :width: 800 + :alt: Flower Deployment Engine with SuperExec + :class: no-scaled-link + + The SuperExec service for managing concurrent training runs in + Flower. + +.. note:: + + This explanation covers the Flower Deployment Engine. An explanation + covering the Flower Simulation Engine will follow. + +.. important:: + + As we continue to enhance Flower at a rapid pace, we'll periodically + update this explainer document. Feel free to share any feedback with + us. + +.. |clientapp_link| replace:: + + ``ClientApp`` + +.. |serverapp_link| replace:: + + ``ServerApp`` + +.. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _serverapp_link: ref-api/flwr.server.ServerApp.html + +.. title:: Flower federated learning architecture + +.. meta:: + :description: Explore the federated learning architecture of the Flower framework, featuring multi-run, concurrent execution, and scalable, secure machine learning while preserving data privacy. diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 73987261b29f..9b001531ee33 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -28,13 +28,13 @@ Use the following terminal command to start a Flower :code:`SuperNode` that has .. code-block:: bash flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key --auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub - + Let's break down the authentication flags: 1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). @@ -56,8 +56,8 @@ Similar to the long-running Flower server (:code:`SuperLink`), you can easily en Use the following terminal command to start an authenticated :code:`SuperNode`: .. code-block:: bash - - flower-client-app client:app + + flower-supernode --root-certificates certificates/ca.crt --superlink 127.0.0.1:9092 --auth-supernode-private-key keys/client_credentials diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index 1828f4ed3258..fc8e89914ac2 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -4,7 +4,7 @@ Enable SSL connections This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and how a Flower client (:code:`SuperNode`) can establish a secure connections to it. -A complete code example demonstrating a secure connection can be found +A complete code example demonstrating a secure connection can be found `here `_. The code example comes with a :code:`README.md` file which explains how to start it. Although it is @@ -42,9 +42,9 @@ Use the following terminal command to start a sever (SuperLink) that uses the pr .. code-block:: bash - flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + flower-superlink + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. @@ -57,7 +57,7 @@ Use the following terminal command to start a client (SuperNode) that uses the p .. code-block:: bash - flower-client-app client:app + flower-supernode --root-certificates certificates/ca.crt --superlink 127.0.0.1:9092 diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index d773e6999245..a621377c8ce6 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -5,7 +5,7 @@ Install Flower Python version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. +Flower requires at least `Python 3.9 `_, but `Python 3.10 `_ or above is recommended. Install stable release diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst index e1e94f095b60..f378e92dbba4 100644 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -88,7 +88,7 @@ Ensure you set the following version constraint in your ``pyproject.toml``: .. code-block:: toml [tool.poetry.dependencies] - python = "^3.8" + python = "^3.9" # Without simulation support flwr = ">=1.8,<2.0" diff --git a/doc/source/index.rst b/doc/source/index.rst index 2a34693f7b26..fe996db62ffb 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -102,7 +102,6 @@ Problem-oriented how-to guides show step-by-step how to achieve a specific goal. :caption: Legacy example guides example-pytorch-from-centralized-to-federated - example-jax-from-centralized-to-federated example-fedbn-pytorch-from-centralized-to-federated Explanations @@ -116,6 +115,7 @@ Understanding-oriented concept guides explain and discuss key topics and underly explanation-federated-evaluation explanation-differential-privacy + explanation-flower-architecture References ~~~~~~~~~~ @@ -174,7 +174,6 @@ The Flower community welcomes contributions. The following docs are intended to :maxdepth: 1 :caption: Contributor explanations - contributor-explanation-architecture contributor-explanation-public-and-private-apis .. toctree:: diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 531afb9ada52..27ea8ea1f94c 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -1,6 +1,140 @@ # Changelog -## Unreleased +## v1.11.1 (2024-09-11) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, `Javier`, `Robert Steiner`, `Yan Gao` + +### Improvements + +- **Implement** `keys/values/items` **methods for** `TypedDict` ([#4146](https://github.com/adap/flower/pull/4146)) + +- **Fix parsing of** `--executor-config` **if present** ([#4125](https://github.com/adap/flower/pull/4125)) + +- **Adjust framework name in templates docstrings** ([#4127](https://github.com/adap/flower/pull/4127)) + +- **Update** `flwr new` **Hugging Face template** ([#4169](https://github.com/adap/flower/pull/4169)) + +- **Fix** `flwr new` **FlowerTune template** ([#4123](https://github.com/adap/flower/pull/4123)) + +- **Add buffer time after** `ServerApp` **thread initialization** ([#4119](https://github.com/adap/flower/pull/4119)) + +- **Handle unsuitable resources for simulation** ([#4143](https://github.com/adap/flower/pull/4143)) + +- **Update example READMEs** ([#4117](https://github.com/adap/flower/pull/4117)) + +- **Update SuperNode authentication docs** ([#4160](https://github.com/adap/flower/pull/4160)) + +### Incompatible changes + +None + +## v1.11.0 (2024-08-30) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`, `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` + +### What's new? + +- **Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** ([#4006](https://github.com/adap/flower/pull/4006), [#3945](https://github.com/adap/flower/pull/3945), [#3999](https://github.com/adap/flower/pull/3999), [#4027](https://github.com/adap/flower/pull/4027), [#3851](https://github.com/adap/flower/pull/3851), [#3946](https://github.com/adap/flower/pull/3946), [#4003](https://github.com/adap/flower/pull/4003), [#4029](https://github.com/adap/flower/pull/4029), [#3942](https://github.com/adap/flower/pull/3942), [#3957](https://github.com/adap/flower/pull/3957), [#4020](https://github.com/adap/flower/pull/4020), [#4044](https://github.com/adap/flower/pull/4044), [#3852](https://github.com/adap/flower/pull/3852), [#4019](https://github.com/adap/flower/pull/4019), [#4031](https://github.com/adap/flower/pull/4031), [#4036](https://github.com/adap/flower/pull/4036), [#4049](https://github.com/adap/flower/pull/4049), [#4017](https://github.com/adap/flower/pull/4017), [#3943](https://github.com/adap/flower/pull/3943), [#3944](https://github.com/adap/flower/pull/3944), [#4011](https://github.com/adap/flower/pull/4011), [#3619](https://github.com/adap/flower/pull/3619)) + + Dynamic code updates are here! `flwr run` can now ship and install the latest version of your `ServerApp` and `ClientApp` to an already-running federation (SuperLink and SuperNodes). + + How does it work? `flwr run` bundles your Flower app into a single FAB (Flower App Bundle) file. It then ships this FAB file, via the SuperExec, to both the SuperLink and those SuperNodes that need it. This allows you to keep SuperExec, SuperLink and SuperNodes running as permanent infrastructure, and then ship code updates (including completely new projects!) dynamically. + + `flwr run` is all you need. + +- **Introduce isolated** `ClientApp` **execution** ([#3970](https://github.com/adap/flower/pull/3970), [#3976](https://github.com/adap/flower/pull/3976), [#4002](https://github.com/adap/flower/pull/4002), [#4001](https://github.com/adap/flower/pull/4001), [#4034](https://github.com/adap/flower/pull/4034), [#4037](https://github.com/adap/flower/pull/4037), [#3977](https://github.com/adap/flower/pull/3977), [#4042](https://github.com/adap/flower/pull/4042), [#3978](https://github.com/adap/flower/pull/3978), [#4039](https://github.com/adap/flower/pull/4039), [#4033](https://github.com/adap/flower/pull/4033), [#3971](https://github.com/adap/flower/pull/3971), [#4035](https://github.com/adap/flower/pull/4035), [#3973](https://github.com/adap/flower/pull/3973), [#4032](https://github.com/adap/flower/pull/4032)) + + The SuperNode can now run your `ClientApp` in a fully isolated way. In an enterprise deployment, this allows you to set strict limits on what the `ClientApp` can and cannot do. + + `flower-supernode` supports three `--isolation` modes: + + - Unset: The SuperNode runs the `ClientApp` in the same process (as in previous versions of Flower). This is the default mode. + - `--isolation=subprocess`: The SuperNode starts a subprocess to run the `ClientApp`. + - `--isolation=process`: The SuperNode expects an externally-managed process to run the `ClientApp`. This external process is not managed by the SuperNode, so it has to be started beforehand and terminated manually. The common way to use this isolation mode is via the new `flwr/clientapp` Docker image. + +- **Improve Docker support for enterprise deployments** ([#4050](https://github.com/adap/flower/pull/4050), [#4090](https://github.com/adap/flower/pull/4090), [#3784](https://github.com/adap/flower/pull/3784), [#3998](https://github.com/adap/flower/pull/3998), [#4094](https://github.com/adap/flower/pull/4094), [#3722](https://github.com/adap/flower/pull/3722)) + + Flower 1.11 ships many Docker improvements that are especially useful for enterprise deployments: + + - `flwr/supernode` comes with a new Alpine Docker image. + - `flwr/clientapp` is a new image to be used with the `--isolation=process` option. In this mode, SuperNode and `ClientApp` run in two different Docker containers. `flwr/supernode` (preferably the Alpine version) runs the long-running SuperNode with `--isolation=process`. `flwr/clientapp` runs the `ClientApp`. This is the recommended way to deploy Flower in enterprise settings. + - New all-in-one Docker Compose enables you to easily start a full Flower Deployment Engine on a single machine. + - Completely new Docker documentation: https://flower.ai/docs/framework/docker/index.html + +- **Improve SuperNode authentication** ([#4043](https://github.com/adap/flower/pull/4043), [#4047](https://github.com/adap/flower/pull/4047), [#4074](https://github.com/adap/flower/pull/4074)) + + SuperNode auth has been improved in several ways, including improved logging, improved testing, and improved error handling. + +- **Update** `flwr new` **templates** ([#3933](https://github.com/adap/flower/pull/3933), [#3894](https://github.com/adap/flower/pull/3894), [#3930](https://github.com/adap/flower/pull/3930), [#3931](https://github.com/adap/flower/pull/3931), [#3997](https://github.com/adap/flower/pull/3997), [#3979](https://github.com/adap/flower/pull/3979), [#3965](https://github.com/adap/flower/pull/3965), [#4013](https://github.com/adap/flower/pull/4013), [#4064](https://github.com/adap/flower/pull/4064)) + + All `flwr new` templates have been updated to show the latest recommended use of Flower APIs. + +- **Improve Simulation Engine** ([#4095](https://github.com/adap/flower/pull/4095), [#3913](https://github.com/adap/flower/pull/3913), [#4059](https://github.com/adap/flower/pull/4059), [#3954](https://github.com/adap/flower/pull/3954), [#4071](https://github.com/adap/flower/pull/4071), [#3985](https://github.com/adap/flower/pull/3985), [#3988](https://github.com/adap/flower/pull/3988)) + + The Flower Simulation Engine comes with several updates, including improved run config support, verbose logging, simulation backend configuration via `flwr run`, and more. + +- **Improve** `RecordSet` ([#4052](https://github.com/adap/flower/pull/4052), [#3218](https://github.com/adap/flower/pull/3218), [#4016](https://github.com/adap/flower/pull/4016)) + + `RecordSet` is the core object to exchange model parameters, configuration values and metrics between `ClientApp` and `ServerApp`. This release ships several smaller improvements to `RecordSet` and related `*Record` types. + +- **Update documentation** ([#3972](https://github.com/adap/flower/pull/3972), [#3925](https://github.com/adap/flower/pull/3925), [#4061](https://github.com/adap/flower/pull/4061), [#3984](https://github.com/adap/flower/pull/3984), [#3917](https://github.com/adap/flower/pull/3917), [#3900](https://github.com/adap/flower/pull/3900), [#4066](https://github.com/adap/flower/pull/4066), [#3765](https://github.com/adap/flower/pull/3765), [#4021](https://github.com/adap/flower/pull/4021), [#3906](https://github.com/adap/flower/pull/3906), [#4063](https://github.com/adap/flower/pull/4063), [#4076](https://github.com/adap/flower/pull/4076), [#3920](https://github.com/adap/flower/pull/3920), [#3916](https://github.com/adap/flower/pull/3916)) + + Many parts of the documentation, including the main tutorial, have been migrated to show new Flower APIs and other new Flower features like the improved Docker support. + +- **Migrate code example to use new Flower APIs** ([#3758](https://github.com/adap/flower/pull/3758), [#3701](https://github.com/adap/flower/pull/3701), [#3919](https://github.com/adap/flower/pull/3919), [#3918](https://github.com/adap/flower/pull/3918), [#3934](https://github.com/adap/flower/pull/3934), [#3893](https://github.com/adap/flower/pull/3893), [#3833](https://github.com/adap/flower/pull/3833), [#3922](https://github.com/adap/flower/pull/3922), [#3846](https://github.com/adap/flower/pull/3846), [#3777](https://github.com/adap/flower/pull/3777), [#3874](https://github.com/adap/flower/pull/3874), [#3873](https://github.com/adap/flower/pull/3873), [#3935](https://github.com/adap/flower/pull/3935), [#3754](https://github.com/adap/flower/pull/3754), [#3980](https://github.com/adap/flower/pull/3980), [#4089](https://github.com/adap/flower/pull/4089), [#4046](https://github.com/adap/flower/pull/4046), [#3314](https://github.com/adap/flower/pull/3314), [#3316](https://github.com/adap/flower/pull/3316), [#3295](https://github.com/adap/flower/pull/3295), [#3313](https://github.com/adap/flower/pull/3313)) + + Many code examples have been migrated to use new Flower APIs. + +- **Update Flower framework, framework internals and quality infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), [#4053](https://github.com/adap/flower/pull/4053), [#4098](https://github.com/adap/flower/pull/4098), [#4067](https://github.com/adap/flower/pull/4067), [#4105](https://github.com/adap/flower/pull/4105), [#4048](https://github.com/adap/flower/pull/4048), [#4107](https://github.com/adap/flower/pull/4107), [#4069](https://github.com/adap/flower/pull/4069), [#3915](https://github.com/adap/flower/pull/3915), [#4101](https://github.com/adap/flower/pull/4101), [#4108](https://github.com/adap/flower/pull/4108), [#3914](https://github.com/adap/flower/pull/3914), [#4068](https://github.com/adap/flower/pull/4068), [#4041](https://github.com/adap/flower/pull/4041), [#4040](https://github.com/adap/flower/pull/4040), [#3986](https://github.com/adap/flower/pull/3986), [#4026](https://github.com/adap/flower/pull/4026), [#3961](https://github.com/adap/flower/pull/3961), [#3975](https://github.com/adap/flower/pull/3975), [#3983](https://github.com/adap/flower/pull/3983), [#4091](https://github.com/adap/flower/pull/4091), [#3982](https://github.com/adap/flower/pull/3982), [#4079](https://github.com/adap/flower/pull/4079), [#4073](https://github.com/adap/flower/pull/4073), [#4060](https://github.com/adap/flower/pull/4060), [#4106](https://github.com/adap/flower/pull/4106), [#4080](https://github.com/adap/flower/pull/4080), [#3974](https://github.com/adap/flower/pull/3974), [#3996](https://github.com/adap/flower/pull/3996), [#3991](https://github.com/adap/flower/pull/3991), [#3981](https://github.com/adap/flower/pull/3981), [#4093](https://github.com/adap/flower/pull/4093), [#4100](https://github.com/adap/flower/pull/4100), [#3939](https://github.com/adap/flower/pull/3939), [#3955](https://github.com/adap/flower/pull/3955), [#3940](https://github.com/adap/flower/pull/3940), [#4038](https://github.com/adap/flower/pull/4038)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Deprecations + +- **Deprecate accessing `Context` via `Client.context`** ([#3797](https://github.com/adap/flower/pull/3797)) + + Now that both `client_fn` and `server_fn` receive a `Context` object, accessing `Context` via `Client.context` is deprecated. `Client.context` will be removed in a future release. If you need to access `Context` in your `Client` implementation, pass it manually when creating the `Client` instance in `client_fn`: + + ```python + def client_fn(context: Context) -> Client: + return FlowerClient(context).to_client() + ``` + +### Incompatible changes + +- **Update CLIs to accept an app directory instead of** `ClientApp` **and** `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), [#4077](https://github.com/adap/flower/pull/4077), [#3850](https://github.com/adap/flower/pull/3850)) + + The CLI commands `flower-supernode` and `flower-server-app` now accept an app directory as argument (instead of references to a `ClientApp` or `ServerApp`). An app directory is any directory containing a `pyproject.toml` file (with the appropriate Flower config fields set). The easiest way to generate a compatible project structure is to use `flwr new`. + +- **Disable** `flower-client-app` **CLI command** ([#4022](https://github.com/adap/flower/pull/4022)) + + `flower-client-app` has been disabled. Use `flower-supernode` instead. + +- **Use spaces instead of commas for separating config args** ([#4000](https://github.com/adap/flower/pull/4000)) + + When passing configs (run config, node config) to Flower, you now need to separate key-value pairs using spaces instead of commas. For example: + + ```bash + flwr run . --run-config "learning-rate=0.01 num_rounds=10" # Works + ``` + + Previously, you could pass configs using commas, like this: + + ```bash + flwr run . --run-config "learning-rate=0.01,num_rounds=10" # Doesn't work + ``` + +- **Remove** `flwr example` **CLI command** ([#4084](https://github.com/adap/flower/pull/4084)) + + The experimental `flwr example` CLI command has been removed. Use `flwr new` to generate a project and then run it using `flwr run`. ## v1.10.0 (2024-07-24) diff --git a/doc/source/ref-faq.rst b/doc/source/ref-faq.rst index 26b7dca4a0a7..e3bd754d481c 100644 --- a/doc/source/ref-faq.rst +++ b/doc/source/ref-faq.rst @@ -25,6 +25,9 @@ This page collects answers to commonly asked questions about Federated Learning Yes, of course. A list of available examples using Flower within a blockchain environment is available here: + * `FLock: A Decentralised AI Training Platform `_. + * Contribute to on-chain training the model and earn rewards. + * Local blockchain with federated learning simulation. * `Flower meets Nevermined GitHub Repository `_. * `Flower meets Nevermined YouTube video `_. * `Flower meets KOSMoS `_. diff --git a/doc/source/tutorial-quickstart-fastai.rst b/doc/source/tutorial-quickstart-fastai.rst index 63f5ac176082..e42328e6f712 100644 --- a/doc/source/tutorial-quickstart-fastai.rst +++ b/doc/source/tutorial-quickstart-fastai.rst @@ -1,12 +1,113 @@ .. _quickstart-fastai: +################### + Quickstart fastai +################### -Quickstart fastai -================= +In this federated learning tutorial we will learn how to train a +SqueezeNet model on MNIST using Flower and fastai. It is recommended to +create a virtual environment and run everything within a +:doc:`virtualenv `. -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with FastAI to train a vision model on CIFAR-10. +Then, clone the code example directly from GitHub: -Let's build a federated learning system using fastai and Flower! +.. code:: shell -Please refer to the `full code example `_ to learn more. + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-fastai . \ + && rm -rf _tmp && cd quickstart-fastai + +This will create a new directory called `quickstart-fastai` containing +the following files: + +.. code:: shell + + quickstart-fastai + ├── fastai_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-fastai + + # Install project and dependencies + $ pip install -e . + +This example by default runs the Flower Simulation Engine, creating a +federation of 10 nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using +Flower Dataset's `IidPartitioner +`_. +Let's run the project: + +.. code:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 143.02s + INFO : History (loss, distributed): + INFO : round 1: 2.699497365951538 + INFO : round 2: 0.9549586296081543 + INFO : round 3: 0.6627192616462707 + INFO : History (metrics, distributed, evaluate): + INFO : {'accuracy': [(1, 0.09766666889190674), + INFO : (2, 0.6948333323001862), + INFO : (3, 0.7721666693687439)]} + INFO : + +You can also override the parameters defined in the +``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: + +.. code:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 + +.. note:: + + Check the `source code + `_ + of this tutorial in ``examples/quickstart-fasai`` in the Flower + GitHub repository. diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst index 7d8128230901..e5caa3b19dd6 100644 --- a/doc/source/tutorial-quickstart-huggingface.rst +++ b/doc/source/tutorial-quickstart-huggingface.rst @@ -1,229 +1,437 @@ .. _quickstart-huggingface: +########################### + Quickstart 🤗 Transformers +########################### + +In this federated learning tutorial we will learn how to train a large +language model (LLM) on the `IMDB +`_ dataset using +Flower and the 🤗 Hugging Face Transformers library. It is recommended to +create a virtual environment and run everything within a +:doc:`virtualenv `. + +Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face +project. It will generate all the files needed to run, by default with +the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ +The dataset will be partitioned using |flowerdatasets|_'s +|iidpartitioner|_. + +Now that we have a rough idea of what this example is about, let's get +started. First, install Flower in your new environment: + +.. code:: shell + + # In a new Python environment + $ pip install flwr + +Then, run the command below. You will be prompted to select one of the +available templates (choose ``HuggingFace``), give a name to your +project, and type in your developer name: + +.. code:: shell + + $ flwr new + +After running it you'll notice a new directory with your project name +has been created. It should have the following structure: + +.. code:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can +do so by: + +.. code:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project, do: + +.. code:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 10) + INFO : aggregate_fit: received 2 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 249.11s + INFO : History (loss, distributed): + INFO : round 1: 0.02111011856794357 + INFO : round 2: 0.019722302150726317 + INFO : round 3: 0.018227258533239362 + INFO : + +You can also run the project with GPU as follows: + +.. code:: shell + + # Run with default arguments + $ flwr run . localhost-gpu + +This will use the default arguments where each ``ClientApp`` will use 2 +CPUs and at most 4 ``ClientApp``\s will run in a given GPU. + +You can also override the parameters defined in the +``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: + +.. code:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 fraction-fit=0.2" + +What follows is an explanation of each component in the project you just +created: dataset partition, the model, defining the ``ClientApp`` and +defining the ``ServerApp``. + +********** + The Data +********** + +This tutorial uses |flowerdatasets|_ to easily download and partition +the `IMDB `_ dataset. +In this example you'll make use of the |iidpartitioner|_ to generate +``num_partitions`` partitions. You can choose |otherpartitioners|_ +available in Flower Datasets. To tokenize the text, we will also load +the tokenizer from the pre-trained Transformer model that we'll use +during training - more on that in the next section. Each ``ClientApp`` +will call this function to create dataloaders with the data that +correspond to their data partition. + +.. code:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name) + + def tokenize_function(examples): + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) + + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + +*********** + The Model +*********** + +We will leverage 🤗 Hugging Face to federate the training of language +models over multiple clients using Flower. More specifically, we will +fine-tune a pre-trained Transformer model (|berttiny|_) for sequence +classification over the dataset of IMDB ratings. The end goal is to +detect if a movie rating is positive or negative. If you have access to +larger GPUs, feel free to use larger models! + +.. code:: python + + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + +Note that here, ``model_name`` is a string that will be loaded from the +``Context`` in the ClientApp and ServerApp. + +In addition to loading the pretrained model weights and architecture, we +also include two utility functions to perform both training (i.e. +``train()``) and evaluation (i.e. ``test()``) using the above model. +These functions should look fairly familiar if you have some prior +experience with PyTorch. Note these functions do not have anything +specific to Flower. That being said, the training function will normally +be called, as we'll see later, from a Flower client passing its own +data. In summary, your clients can use standard training/testing +functions to perform local training or evaluation: + +.. code:: python + + def train(net, trainloader, epochs, device): + optimizer = AdamW(net.parameters(), lr=5e-5) + net.train() + for _ in range(epochs): + for batch in trainloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = net(**batch) + loss = outputs.loss + loss.backward() + optimizer.step() + optimizer.zero_grad() + + + def test(net, testloader, device): + metric = load_metric("accuracy") + loss = 0 + net.eval() + for batch in testloader: + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = net(**batch) + logits = outputs.logits + loss += outputs.loss.item() + predictions = torch.argmax(logits, dim=-1) + metric.add_batch(predictions=predictions, references=batch["labels"]) + loss /= len(testloader.dataset) + accuracy = metric.compute()["accuracy"] + return loss, accuracy + +*************** + The ClientApp +*************** + +The main changes we have to make to use 🤗 Hugging Face with Flower will +be found in the ``get_weights()`` and ``set_weights()`` functions. Under +the hood, the ``transformers`` library uses PyTorch, which means we can +reuse the ``get_weights()`` and ``set_weights()`` code that we defined +in the :doc:`Quickstart PyTorch ` tutorial. +As a reminder, in ``get_weights()``, PyTorch model parameters are +extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the opposite: given a list of NumPy +arrays it applies them to an existing PyTorch model. Doing this in +fairly easy in PyTorch. + +.. note:: + + The specific implementation of ``get_weights()`` and + ``set_weights()`` depends on the type of models you use. The ones + shown below work for a wide range of PyTorch models but you might + need to adjust them if you have more exotic model architectures. + +.. code:: python + + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + +The rest of the functionality is directly inspired by the centralized +case. The ``fit()`` method in the client trains the model using the +local dataset. Similarly, the ``evaluate()`` method is used to evaluate +the model received on a held-out validation set that the client might +have: + +.. code:: python + + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` +defined above by means of a ``client_fn()`` callback. Note that the +`context` enables you to get access to hyperparemeters defined in your +``pyproject.toml`` to configure the run. In this tutorial we access the +``local-epochs`` setting to control the number of epochs a ``ClientApp`` +will perform when running the ``fit()`` method. You could define +additional hyperparameters in ``pyproject.toml`` and access them here. + +.. code:: python -Quickstart 🤗 Transformers -========================== + def client_fn(context: Context): -.. meta:: - :description: Check out this Federating Learning quickstart tutorial for using Flower with HuggingFace Transformers in order to fine-tune an LLM. - -Let's build a federated learning system using Hugging Face Transformers and Flower! - -We will leverage Hugging Face to federate the training of language models over multiple clients using Flower. -More specifically, we will fine-tune a pre-trained Transformer model (distilBERT) -for sequence classification over a dataset of IMDB ratings. -The end goal is to detect if a movie rating is positive or negative. - -Dependencies ------------- - -To follow along this tutorial you will need to install the following packages: -:code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, and :code:`transformers`. -This can be done using :code:`pip`: - -.. code-block:: shell - - $ pip install datasets evaluate flwr torch transformers - - -Standard Hugging Face workflow ------------------------------- - -Handling the data -^^^^^^^^^^^^^^^^^ - -To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` library. -We then need to tokenize the data and create :code:`PyTorch` dataloaders, -this is all done in the :code:`load_data` function: - -.. code-block:: python - - import random - import torch - from datasets import load_dataset - from torch.utils.data import DataLoader - from transformers import AutoTokenizer, DataCollatorWithPadding - - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - CHECKPOINT = "distilbert-base-uncased" - - def load_data(): - """Load IMDB data (training and eval)""" - raw_datasets = load_dataset("imdb") - raw_datasets = raw_datasets.shuffle(seed=42) - # remove unnecessary data split - del raw_datasets["unsupervised"] - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) - def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) - # We will take a small sample in order to reduce the compute time, this is optional - train_population = random.sample(range(len(raw_datasets["train"])), 100) - test_population = random.sample(range(len(raw_datasets["test"])), 100) - tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) - tokenized_datasets["train"] = tokenized_datasets["train"].select(train_population) - tokenized_datasets["test"] = tokenized_datasets["test"].select(test_population) - tokenized_datasets = tokenized_datasets.remove_columns("text") - tokenized_datasets = tokenized_datasets.rename_column("label", "labels") - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - tokenized_datasets["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - testloader = DataLoader( - tokenized_datasets["test"], batch_size=32, collate_fn=data_collator - ) - return trainloader, testloader - - -Training and testing the model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Once we have a way of creating our trainloader and testloader, -we can take care of the training and testing. -This is very similar to any :code:`PyTorch` training or testing loop: - -.. code-block:: python - - from evaluate import load as load_metric - from transformers import AdamW - - def train(net, trainloader, epochs): - optimizer = AdamW(net.parameters(), lr=5e-5) - net.train() - for _ in range(epochs): - for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - outputs = net(**batch) - loss = outputs.loss - loss.backward() - optimizer.step() - optimizer.zero_grad() - def test(net, testloader): - metric = load_metric("accuracy") - loss = 0 - net.eval() - for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - with torch.no_grad(): - outputs = net(**batch) - logits = outputs.logits - loss += outputs.loss.item() - predictions = torch.argmax(logits, dim=-1) - metric.add_batch(predictions=predictions, references=batch["labels"]) - loss /= len(testloader.dataset) - accuracy = metric.compute()["accuracy"] - return loss, accuracy - - -Creating the model itself -^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create the model itself, -we will just load the pre-trained distillBERT model using Hugging Face’s :code:`AutoModelForSequenceClassification` : - -.. code-block:: python - - from transformers import AutoModelForSequenceClassification - - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) - - -Federating the example ----------------------- - -Creating the IMDBClient -^^^^^^^^^^^^^^^^^^^^^^^ - -To federate our example to multiple clients, -we first need to write our Flower client class (inheriting from :code:`flwr.client.NumPyClient`). -This is very easy, as our model is a standard :code:`PyTorch` model: - -.. code-block:: python - - from collections import OrderedDict - import flwr as fl - - class IMDBClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - def fit(self, parameters, config): - self.set_parameters(parameters) - print("Training Started...") - train(net, trainloader, epochs=1) - print("Training Finished.") - return self.get_parameters(config={}), len(trainloader), {} - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), len(testloader), {"accuracy": float(accuracy)} - - -The :code:`get_parameters` function lets the server get the client's parameters. -Inversely, the :code:`set_parameters` function allows the server to send its parameters to the client. -Finally, the :code:`fit` function trains the model locally for the client, -and the :code:`evaluate` function tests the model locally and returns the relevant metrics. - -Starting the server -^^^^^^^^^^^^^^^^^^^ + # Get this client's dataset partition + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) + + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + + # Flower ClientApp + app = ClientApp(client_fn) + +*************** + The ServerApp +*************** + +To construct a ``ServerApp`` we define a ``server_fn()`` callback with +an identical signature to that of ``client_fn()`` but the return type is +|serverappcomponents|_ as opposed to a |client|_ In this example we use +the `FedAvg` strategy. To it we pass a randomly initialized model that +will server as the global model to federated. Note that the value of +``fraction_fit`` is read from the run config. You can find the default +value defined in the ``pyproject.toml``. + +.. code:: python + + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) + + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + + # Create ServerApp + app = ServerApp(server_fn=server_fn) + +Congratulations! You've successfully built and run your first federated +learning system for an LLM. + +.. note:: + + Check the source code of the extended version of this tutorial in + |quickstart_hf_link|_ in the Flower GitHub repository. For a + comprehensive example of a federated fine-tuning of an LLM with + Flower, refer to the |flowertune|_ example in the Flower GitHub + repository. -Now that we have a way to instantiate clients, we need to create our server in order to aggregate the results. -Using Flower, this can be done very easily by first choosing a strategy (here, we are using :code:`FedAvg`, -which will define the global weights as the average of all the clients' weights at each round) -and then using the :code:`flwr.server.start_server` function: - -.. code-block:: python - - def weighted_average(metrics): - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - losses = [num_examples * m["loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - return {"accuracy": sum(accuracies) / sum(examples), "loss": sum(losses) / sum(examples)} - - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=weighted_average, - ) - - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) +.. |quickstart_hf_link| replace:: + ``examples/quickstart-huggingface`` -The :code:`weighted_average` function is there to provide a way to aggregate the metrics distributed amongst -the clients (basically this allows us to display a nice average accuracy and loss for every round). +.. |fedavg| replace:: -Putting everything together ---------------------------- + ``FedAvg`` -We can now start client instances using: +.. |iidpartitioner| replace:: -.. code-block:: python + ``IidPartitioner`` - fl.client.start_client( - server_address="127.0.0.1:8080", - client=IMDBClient().to_client() - ) +.. |otherpartitioners| replace:: + other partitioners -And they will be able to connect to the server and start the federated training. +.. |berttiny| replace:: -If you want to check out everything put together, -you should check out the `full code example `_ . + ``bert-tiny`` -Of course, this is a very basic example, and a lot can be added or modified, -it was just to showcase how simply we could federate a Hugging Face workflow using Flower. +.. |serverappcomponents| replace:: -Note that in this example we used :code:`PyTorch`, but we could have very well used :code:`TensorFlow`. + ``ServerAppComponents`` + +.. |client| replace:: + + ``Client`` + +.. |flowerdatasets| replace:: + + Flower Datasets + +.. |flowertune| replace:: + + FlowerTune LLM + +.. _berttiny: https://huggingface.co/prajjwal1/bert-tiny + +.. _client: ref-api/flwr.client.Client.html#client + +.. _fedavg: ref-api/flwr.server.strategy.FedAvg.html#flwr.server.strategy.FedAvg + +.. _flowerdatasets: https://flower.ai/docs/datasets/ + +.. _flowertune: https://github.com/adap/flower/tree/main/examples/flowertune-llm + +.. _iidpartitioner: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner + +.. _otherpartitioners: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html + +.. _quickstart_hf_link: https://github.com/adap/flower/tree/main/examples/quickstart-huggingface + +.. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents + +.. meta:: + :description: Check out this Federating Learning quickstart tutorial for using Flower with 🤗 HuggingFace Transformers in order to fine-tune an LLM. diff --git a/doc/source/tutorial-quickstart-mlx.rst b/doc/source/tutorial-quickstart-mlx.rst index 0999bf44d3b7..675a08502d26 100644 --- a/doc/source/tutorial-quickstart-mlx.rst +++ b/doc/source/tutorial-quickstart-mlx.rst @@ -109,7 +109,7 @@ You can also override the parameters defined in .. code:: shell # Override some arguments - $ flwr run . --run-config num-server-rounds=5,lr=0.05 + $ flwr run . --run-config "num-server-rounds=5 lr=0.05" What follows is an explanation of each component in the project you just created: dataset partition, the model, defining the ``ClientApp`` and diff --git a/doc/source/tutorial-quickstart-pytorch-lightning.rst b/doc/source/tutorial-quickstart-pytorch-lightning.rst index acfbecf41260..7c74c9a1682f 100644 --- a/doc/source/tutorial-quickstart-pytorch-lightning.rst +++ b/doc/source/tutorial-quickstart-pytorch-lightning.rst @@ -1,12 +1,119 @@ .. _quickstart-pytorch-lightning: +############################## + Quickstart PyTorch Lightning +############################## -Quickstart PyTorch Lightning -============================ +In this federated learning tutorial we will learn how to train an +AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is +recommended to create a virtual environment and run everything within a +:doc:`virtualenv `. -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch Lightning to train an Auto Encoder model on MNIST. +Then, clone the code example directly from GitHub: -Let's build a horizontal federated learning system using PyTorch Lightning and Flower! +.. code:: shell -Please refer to the `full code example `_ to learn more. + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch-lightning . \ + && rm -rf _tmp && cd quickstart-pytorch-lightning + +This will create a new directory called `quickstart-pytorch-lightning` +containing the following files: + +.. code:: shell + + quickstart-pytorch-lightning + ├── pytorchlightning_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-pytorch-lightning + + # Install project and dependencies + $ pip install -e . + +By default, Flower Simulation Engine will be started and it will create +a federation of 4 nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using +Flower Dataset's `IidPartitioner +`_. +To run the project, do: + +.. code:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 136.92s + INFO : History (loss, distributed): + INFO : round 1: 0.04982871934771538 + INFO : round 2: 0.046457378193736076 + INFO : round 3: 0.04506748169660568 + INFO : + +Each simulated `ClientApp` (two per round) will also log a summary of +their local training process. Expect this output to be similar to: + +.. code:: shell + + # The left part indicates the process ID running the `ClientApp` + (ClientAppActor pid=38155) ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + (ClientAppActor pid=38155) ┃ Test metric ┃ DataLoader 0 ┃ + (ClientAppActor pid=38155) ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ + (ClientAppActor pid=38155) │ test_loss │ 0.045175597071647644 │ + (ClientAppActor pid=38155) └───────────────────────────┴───────────────────────────┘ + +You can also override the parameters defined in the +``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: + +.. code:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 + +.. note:: + + Check the `source code + `_ + of this tutorial in ``examples/quickstart-pytorch-lightning`` in the + Flower GitHub repository. diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst index 4515e8d0eeb5..d00b9efbe16b 100644 --- a/doc/source/tutorial-quickstart-pytorch.rst +++ b/doc/source/tutorial-quickstart-pytorch.rst @@ -108,7 +108,7 @@ You can also override the parameters defined in the .. code:: shell # Override some arguments - $ flwr run . --run-config num-server-rounds=5,local-epochs=3 + $ flwr run . --run-config "num-server-rounds=5 local-epochs=3" What follows is an explanation of each component in the project you just created: dataset partition, the model, defining the ``ClientApp`` and diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst index bd63eb461d21..ffcd9efeb9bc 100644 --- a/doc/source/tutorial-quickstart-tensorflow.rst +++ b/doc/source/tutorial-quickstart-tensorflow.rst @@ -1,171 +1,307 @@ .. _quickstart-tensorflow: +####################### + Quickstart TensorFlow +####################### + +In this tutorial we will learn how to train a Convolutional Neural +Network on CIFAR-10 using the Flower framework and TensorFlow. First of +all, it is recommended to create a virtual environment and run +everything within a :doc:`virtualenv +`. + +Let's use `flwr new` to create a complete Flower+TensorFlow project. It +will generate all the files needed to run, by default with the Flower +Simulation Engine, a federation of 10 nodes using `FedAvg +`_. +The dataset will be partitioned using Flower Dataset's `IidPartitioner +`_. + +Now that we have a rough idea of what this example is about, let's get +started. First, install Flower in your new environment: + +.. code:: shell + + # In a new Python environment + $ pip install flwr + +Then, run the command below. You will be prompted to select one of the +available templates (choose ``TensorFlow``), give a name to your +project, and type in your developer name: + +.. code:: shell + + $ flwr new + +After running it you'll notice a new directory with your project name +has been created. It should have the following structure: + +.. code:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can +do so by: + +.. code:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project, do: + +.. code:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 31.31s + INFO : History (loss, distributed): + INFO : round 1: 1.9066195368766785 + INFO : round 2: 1.657227087020874 + INFO : round 3: 1.559039831161499 + INFO : + +You can also override the parameters defined in the +``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: + +.. code:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 batch-size=16" + +********** + The Data +********** + +This tutorial uses `Flower Datasets `_ +to easily download and partition the `CIFAR-10` dataset. In this example +you'll make use of the `IidPartitioner +`_ +to generate `num_partitions` partitions. You can choose `other +partitioners +`_ +available in Flower Datasets. Each ``ClientApp`` will call this function +to create the ``NumPy`` arrays that correspond to their data partition. + +.. code:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + +*********** + The Model +*********** + +Next, we need a model. We defined a simple Convolutional Neural Network +(CNN), but feel free to replace it with a more sophisticated model if +you'd like: + +.. code:: python + + def load_model(learning_rate: float = 0.001): + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + model.compile( + "adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + +*************** + The ClientApp +*************** + +With `TensorFlow`, we can use the built-in ``get_weights()`` and +``set_weights()`` functions, which simplifies the implementation with +`Flower`. The rest of the functionality in the ClientApp is directly +inspired by the centralized case. The ``fit()`` method in the client +trains the model using the local dataset. Similarly, the ``evaluate()`` +method is used to evaluate the model received on a held-out validation +set that the client might have: + +.. code:: python + + class FlowerClient(NumPyClient): + def __init__(self, model, data, epochs, batch_size, verbose): + self.model = model + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` +defined above by means of a ``client_fn()`` callback. Note that the +`context` enables you to get access to hyperparameters defined in your +``pyproject.toml`` to configure the run. For example, in this tutorial +we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method, in +addition to `batch-size`. You could define additional hyperparameters in +``pyproject.toml`` and access them here. + +.. code:: python + + def client_fn(context: Context): + # Load model and data + net = load_model() + + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") + + # Return Client instance + return FlowerClient( + net, data, epochs, batch_size, verbose + ).to_client() + + + # Flower ClientApp + app = ClientApp(client_fn=client_fn) + +*************** + The ServerApp +*************** + +To construct a ``ServerApp`` we define a ``server_fn()`` callback with +an identical signature to that of ``client_fn()`` but the return type is +`ServerAppComponents +`_ +as opposed to a `Client +`_. +In this example we use the `FedAvg`. To it we pass a randomly +initialized model that will serve as the global model to federate. + +.. code:: python + + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Get parameters to initialize global model + parameters = ndarrays_to_parameters(load_model().get_weights()) + + # Define strategy + strategy = strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + # Create ServerApp + app = ServerApp(server_fn=server_fn) -Quickstart TensorFlow -===================== - -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a MobilNetV2 model on CIFAR-10. - -.. youtube:: FGTc2TQq7VM - :width: 100% - -Let's build a federated learning system in less than 20 lines of code! - -Before Flower can be imported we have to install it: - -.. code-block:: shell - - $ pip install flwr - -Since we want to use the Keras API of TensorFlow (TF), we have to install TF as well: - -.. code-block:: shell - - $ pip install tensorflow - - -Flower Client -------------- - -Next, in a file called :code:`client.py`, import Flower and TensorFlow: - -.. code-block:: python - - import flwr as fl - import tensorflow as tf - -We use the Keras utilities of TF to load CIFAR10, a popular colored image classification -dataset for machine learning. The call to -:code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, -and then returns the entire training and test set as NumPy ndarrays. - -.. code-block:: python - - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - -Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 with 10 output classes: - -.. code-block:: python - - model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses Keras. -The :code:`NumPyClient` interface defines three methods which can be -implemented in the following way: - -.. code-block:: python - - class CifarClient(fl.client.NumPyClient): - def get_parameters(self, config): - return model.get_weights() - - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32, steps_per_epoch=3) - return model.get_weights(), len(x_train), {} - - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": float(accuracy)} - - -We can now create an instance of our class :code:`CifarClient` and add one line -to actually run this client: - -.. code-block:: python - - fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) - - -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. - - -Flower Server -------------- - -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: - -.. code-block:: python - - import flwr as fl - - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) - - -Train the model, federated! ---------------------------- - -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: - -.. code-block:: shell - - $ python server.py - -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Congratulations! You've successfully built and run your first federated +learning system. -.. code-block:: shell +.. note:: - $ python client.py + Check the source code of the extended version of this tutorial in + |quickstart_tf_link|_ in the Flower GitHub repository. -Open another terminal and start the second client: +.. |quickstart_tf_link| replace:: -.. code-block:: shell + :code:`examples/quickstart-tensorflow` - $ python client.py +.. _quickstart_tf_link: https://github.com/adap/flower/blob/main/examples/quickstart-tensorflow -Each client will have its own dataset. +**************** + Video tutorial +**************** -You should now see how the training does in the very first terminal (the one -that started the server): +.. note:: -.. code-block:: shell + The video shown below shows how to setup a TensorFlow + Flower + project using our previously recommended APIs. A new video tutorial + will be released that shows the new APIs (as the content above does) - INFO flower 2021-02-25 14:15:46,741 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-02-25 14:15:46,742 | server.py:72 | Getting initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:74 | Evaluating initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-02-25 14:16:12,341 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:21:17,235 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:17,512 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:21:29,628 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:29,696 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:25:59,917 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:00,227 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:26:11,457 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:11,530 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:30:43,389 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:30:43,630 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:30:53,384 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:30:53,384 | server.py:122 | [TIME] FL finished in 891.6143046000007 - INFO flower 2021-02-25 14:30:53,385 | app.py:109 | app_fit: losses_distributed [(1, 2.3196680545806885), (2, 2.3202896118164062), (3, 2.1818180084228516)] - INFO flower 2021-02-25 14:30:53,385 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-02-25 14:30:53,385 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-02-25 14:30:53,385 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-02-25 14:30:53,442 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:31:02,848 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:31:02,848 | app.py:121 | app_evaluate: federated loss: 2.1818180084228516 - INFO flower 2021-02-25 14:31:02,848 | app.py:125 | app_evaluate: results [('ipv4:127.0.0.1:57158', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852})), ('ipv4:127.0.0.1:57160', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852}))] - INFO flower 2021-02-25 14:31:02,848 | app.py:127 | app_evaluate: failures [] flower 2020-07-15 10:07:56,396 | app.py:77 | app_evaluate: failures [] +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a CNN model on CIFAR-10. -Congratulations! You've successfully built and run your first federated -learning system. The full `source code `_ for this can be found in -:code:`examples/quickstart-tensorflow/client.py`. +.. youtube:: FGTc2TQq7VM + :width: 100% diff --git a/e2e/strategies/pyproject.toml b/e2e/strategies/pyproject.toml index 5cc74b20fa24..3ad62ec836a7 100644 --- a/e2e/strategies/pyproject.toml +++ b/e2e/strategies/pyproject.toml @@ -9,7 +9,7 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorflow-cpu = "^2.9.1, !=2.11.1" tensorflow-io-gcs-filesystem = "<0.35.0" diff --git a/e2e/test_superlink.sh b/e2e/test_superlink.sh index 684f386bd388..2016f6da1933 100755 --- a/e2e/test_superlink.sh +++ b/e2e/test_superlink.sh @@ -2,7 +2,7 @@ set -e case "$1" in - e2e-bare-https) + e2e-bare-https | e2e-bare-auth) ./generate.sh server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" client_arg="--root-certificates certificates/ca.crt" @@ -37,14 +37,11 @@ case "$2" in client_auth_2="" ;; client-auth) - ./generate.sh rest_arg_superlink="" rest_arg_supernode="" server_address="127.0.0.1:9092" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" - server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" - client_arg="--root-certificates certificates/ca.crt" server_auth="--auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub" client_auth_1="--auth-supernode-private-key keys/client_credentials_1 --auth-supernode-public-key keys/client_credentials_1.pub" client_auth_2="--auth-supernode-private-key keys/client_credentials_2 --auth-supernode-public-key keys/client_credentials_2.pub" diff --git a/examples/advanced-pytorch/.gitignore b/examples/advanced-pytorch/.gitignore new file mode 100644 index 000000000000..014ee796bf45 --- /dev/null +++ b/examples/advanced-pytorch/.gitignore @@ -0,0 +1,3 @@ +__pycache__/ +outputs/ +wandb/ diff --git a/examples/advanced-pytorch/README.md b/examples/advanced-pytorch/README.md index ac0737673407..1771173c3925 100644 --- a/examples/advanced-pytorch/README.md +++ b/examples/advanced-pytorch/README.md @@ -1,77 +1,90 @@ --- -tags: [advanced, vision, fds] -dataset: [CIFAR-10] +tags: [advanced, vision, fds, wandb] +dataset: [Fashion-MNIST] framework: [torch, torchvision] --- -# Advanced Flower Example (PyTorch) +# Federated Learning with PyTorch and Flower (Advanced Example) -This example demonstrates an advanced federated learning setup using Flower with PyTorch. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: +> \[!TIP\] +> This example shows intermediate and advanced functionality of Flower. It you are new to Flower, it is recommended to start from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example or the [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html). -- 10 clients (instead of just 2) -- Each client holds a local dataset of 5000 training examples and 1000 test examples (note that using the `run.sh` script will only select 10 data samples by default, as the `--toy` argument is set). -- Server-side model evaluation after parameter aggregation -- Hyperparameter schedule using config functions -- Custom return values -- Server-side parameter initialization +This example shows how to extend your `ClientApp` and `ServerApp` capabilities compared to what's shown in the [`quickstart-pytorch`](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example. In particular, it will show how the `ClientApp`'s state (and object of type [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html)) can be used to enable stateful clients, facilitating the design of personalized federated learning strategies, among others. The `ServerApp` in this example makes use of a custom strategy derived from the built-in [FedAvg](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedAvg.html). In addition, it will also showcase how to: -## Project Setup +1. Save model checkpoints +2. Save the metrics available at the strategy (e.g. accuracies, losses) +3. Log training artefacts to [Weights & Biases](https://wandb.ai/site) +4. Implement a simple decaying learning rate schedule across rounds -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +The structure of this directory is as follows: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/advanced-pytorch . && rm -rf flower && cd advanced-pytorch +advanced-pytorch +├── pytorch_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines a custom strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -This will create a new directory called `advanced-pytorch` containing the following files: +> \[!NOTE\] +> By default this example will log metrics to Weights & Biases. For this, you need to ensure that your system has logged in. Often it's as simple as executing `wandb login` on the terminal after installing `wandb`. Please, refer to this [quickstart guide](https://docs.wandb.ai/quickstart#2-log-in-to-wb) for more information. -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md --- run.sh -``` +This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) with the [Dirichlet Partitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.DirichletPartitioner.html#flwr_datasets.partitioner.DirichletPartitioner) to partition the [Fashion-MNIST](https://huggingface.co/datasets/zalando-datasets/fashion_mnist) dataset in a non-IID fashion into 50 partitions. -### Installing Dependencies +![](_static/fmnist_50_lda.png) -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +> \[!TIP\] +> You can use Flower Datasets [built-in visualization tools](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html) to easily generate plots like the one above. -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell +Install the dependencies defined in `pyproject.toml` as well as the `pytorch_example` package. + +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +## Run the project -```shell -poetry run python3 -c "import flwr" -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -If you don't see any errors you're good to go! +When you run the project, the strategy will create a directory structure in the form of `outputs/date/time` and store two `JSON` files: `config.json` containing the `run-config` that the `ServerApp` receives; and `results.json` containing the results (accuracies, losses) that are generated at the strategy. -#### pip +By default, the metrics: {`centralized_accuracy`, `centralized_loss`, `federated_evaluate_accuracy`, `federated_evaluate_loss`} will be logged to Weights & Biases (they are also stored to the `results.json` previously mentioned). Upon executing `flwr run` you'll see a URL linking to your Weight&Biases dashboard wher you can see the metrics. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +![](_static/wandb_plots.png) -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +With default parameters, 25% of the total 50 nodes (see `num-supernodes` in `pyproject.toml`) will be sampled for `fit` and 50% for an `evaluate` round. By default `ClientApp` objects will run on CPU. + +> \[!TIP\] +> To run your `ClientApps` on GPU or to adjust the degree or parallelism of your simulation, edit the `[tool.flwr.federations.local-simulation]` section in the `pyproject.tom`. + +```bash +flwr run . + +# To disable W&B +flwr run . --run-config use-wandb=false ``` -## Run Federated Learning with PyTorch and Flower +You can run the app using another federation (see `pyproject.toml`). For example, if you have a GPU available, select the `local-sim-gpu` federation: -The included `run.sh` will start the Flower server (using `server.py`), -sleep for 2 seconds to ensure that the server is up, and then start 10 Flower clients (using `client.py`) with only a small subset of the data (in order to run on any machine), -but this can be changed by removing the `--toy` argument in the script. You can simply start everything in a terminal as follows: +```bash +flwr run . local-sim-gpu +``` -```shell -# After activating your environment -./run.sh +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.5" ``` -The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). +### Run with the Deployment Engine -You can also manually run `python3 server.py` and `python3 client.py --client-id ` for as many clients as you want but you have to make sure that each command is run in a different terminal window (or a different computer on the network). In addition, you can make your clients use either `EfficienNet` (default) or `AlexNet` (but all clients in the experiment should use the same). Switch between models using the `--model` flag when launching `client.py` and `server.py`. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/advanced-pytorch/_static/fmnist_50_lda.png b/examples/advanced-pytorch/_static/fmnist_50_lda.png new file mode 100644 index 000000000000..9dfedc59a3de Binary files /dev/null and b/examples/advanced-pytorch/_static/fmnist_50_lda.png differ diff --git a/examples/advanced-pytorch/_static/wandb_plots.png b/examples/advanced-pytorch/_static/wandb_plots.png new file mode 100644 index 000000000000..f0f44ca5be19 Binary files /dev/null and b/examples/advanced-pytorch/_static/wandb_plots.png differ diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py deleted file mode 100644 index 1b93d45d950e..000000000000 --- a/examples/advanced-pytorch/client.py +++ /dev/null @@ -1,160 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import datasets -import flwr as fl -import torch -from torch.utils.data import DataLoader - -import utils - -warnings.filterwarnings("ignore") - - -class CifarClient(fl.client.NumPyClient): - def __init__( - self, - trainset: datasets.Dataset, - testset: datasets.Dataset, - device: torch.device, - model_str: str, - validation_split: int = 0.1, - ): - self.device = device - self.trainset = trainset - self.testset = testset - self.validation_split = validation_split - if model_str == "alexnet": - self.model = utils.load_alexnet(classes=10) - else: - self.model = utils.load_efficientnet(classes=10) - - def set_parameters(self, parameters): - """Loads a alexnet or efficientnet model and replaces it parameters with the - ones given.""" - - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - """Train parameters on the locally held training set.""" - - # Update local model parameters - self.set_parameters(parameters) - - # Get hyperparameters for this round - batch_size: int = config["batch_size"] - epochs: int = config["local_epochs"] - - train_valid = self.trainset.train_test_split(self.validation_split, seed=42) - trainset = train_valid["train"] - valset = train_valid["test"] - - train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True) - val_loader = DataLoader(valset, batch_size=batch_size) - - results = utils.train(self.model, train_loader, val_loader, epochs, self.device) - - parameters_prime = utils.get_model_params(self.model) - num_examples_train = len(trainset) - - return parameters_prime, num_examples_train, results - - def evaluate(self, parameters, config): - """Evaluate parameters on the locally held test set.""" - # Update local model parameters - self.set_parameters(parameters) - - # Get config values - steps: int = config["val_steps"] - - # Evaluate global model parameters on the local test data and return results - testloader = DataLoader(self.testset, batch_size=16) - - loss, accuracy = utils.test(self.model, testloader, steps, self.device) - return float(loss), len(self.testset), {"accuracy": float(accuracy)} - - -def client_dry_run(device: torch.device = "cpu"): - """Weak tests to check whether all client methods are working as expected.""" - - model = utils.load_efficientnet(classes=10) - trainset, testset = utils.load_partition(0) - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - client = CifarClient(trainset, testset, device) - client.fit( - utils.get_model_params(model), - {"batch_size": 16, "local_epochs": 1}, - ) - - client.evaluate(utils.get_model_params(model), {"val_steps": 32}) - - print("Dry Run Successful") - - -def main() -> None: - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--dry", - type=bool, - default=False, - required=False, - help="Do a dry-run to check the client", - ) - parser.add_argument( - "--client-id", - type=int, - default=0, - choices=range(0, 10), - required=False, - help="Specifies the artificial data partition of CIFAR10 to be used. \ - Picks partition 0 by default", - ) - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to quicky run the client using only 10 datasamples. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--use_cuda", - type=bool, - default=False, - required=False, - help="Set to true to use GPU. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - device = torch.device( - "cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu" - ) - - if args.dry: - client_dry_run(device) - else: - # Load a subset of CIFAR-10 to simulate the local data partition - trainset, testset = utils.load_partition(args.client_id) - - if args.toy: - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - # Start Flower client - client = CifarClient(trainset, testset, device, args.model).to_client() - fl.client.start_client(server_address="127.0.0.1:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml index b846a6054cc8..553abeecb6ad 100644 --- a/examples/advanced-pytorch/pyproject.toml +++ b/examples/advanced-pytorch/pyproject.toml @@ -1,20 +1,46 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "advanced-pytorch" -version = "0.1.0" -description = "Advanced Flower/PyTorch Example" -authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", +[project] +name = "pytorch-example" +version = "1.0.0" +description = "Federated Learning with PyTorch and Flower (Advanced Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.11.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", + "wandb==0.17.8", ] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "1.13.1" -torchvision = "0.14.1" -validators = "0.18.2" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pytorch_example.server_app:app" +clientapp = "pytorch_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 10 +fraction-fit = 0.25 +fraction-evaluate = 0.5 +local-epochs = 1 +server-device = "cpu" +use-wandb = true + +[tool.flwr.federations] +default = "local-sim" + +[tool.flwr.federations.local-sim] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.0 # ratio of VRAM a ClientApp has access to +[tool.flwr.federations.local-sim-gpu] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.25 diff --git a/examples/advanced-pytorch/pytorch_example/__init__.py b/examples/advanced-pytorch/pytorch_example/__init__.py new file mode 100644 index 000000000000..d93e8cdb922d --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/__init__.py @@ -0,0 +1 @@ +"""pytorch-example: A Flower / PyTorch app.""" diff --git a/examples/advanced-pytorch/pytorch_example/client_app.py b/examples/advanced-pytorch/pytorch_example/client_app.py new file mode 100644 index 000000000000..72a9c8323686 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/client_app.py @@ -0,0 +1,122 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.task import Net, get_weights, load_data, set_weights, test, train + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context, ParametersRecord, RecordSet, array_from_numpy + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + """A simple client that showcases how to use the state. + + It implements a basic version of `personalization` by which + the classification layer of the CNN is stored locally and used + and updated during `fit()` and used during `evaluate()`. + """ + + def __init__( + self, net, client_state: RecordSet, trainloader, valloader, local_epochs + ): + self.net: Net = net + self.client_state = client_state + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + self.local_layer_name = "classification-head" + + def fit(self, parameters, config): + """Train model locally. + + The client stores in its context the parameters of the last layer in the model + (i.e. the classification head). The classifier is saved at the end of the + training and used the next time this client participates. + """ + + # Apply weights from global models (the whole model is replaced) + set_weights(self.net, parameters) + + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + lr=float(config["lr"]), + device=self.device, + ) + # Save classification head to context's state to use in a future fit() call + self._save_layer_weights_to_state() + + # Return locally-trained model and metrics + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def _save_layer_weights_to_state(self): + """Save last layer weights to state.""" + state_dict_arrays = {} + for k, v in self.net.fc2.state_dict().items(): + state_dict_arrays[k] = array_from_numpy(v.cpu().numpy()) + + # Add to recordset (replace if already exists) + self.client_state.parameters_records[self.local_layer_name] = ParametersRecord( + state_dict_arrays + ) + + def _load_layer_weights_from_state(self): + """Load last layer weights to state.""" + if self.local_layer_name not in self.client_state.parameters_records: + return + + state_dict = {} + param_records = self.client_state.parameters_records + for k, v in param_records[self.local_layer_name].items(): + state_dict[k] = torch.from_numpy(v.numpy()) + + # apply previously saved classification head by this client + self.net.fc2.load_state_dict(state_dict, strict=True) + + def evaluate(self, parameters, config): + """Evaluate the global model on the local validation set. + + Note the classification head is replaced with the weights this client had the + last time it trained the model. + """ + set_weights(self.net, parameters) + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + # We pass the state to persist information across + # participation rounds. Note that each client always + # receives the same Context instance (it's a 1:1 mapping) + client_state = context.state + return FlowerClient( + net, client_state, trainloader, valloader, local_epochs + ).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/advanced-pytorch/pytorch_example/server_app.py b/examples/advanced-pytorch/pytorch_example/server_app.py new file mode 100644 index 000000000000..3fa2ae26dc7f --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/server_app.py @@ -0,0 +1,96 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.strategy import CustomFedAvg +from pytorch_example.task import ( + Net, + apply_eval_transforms, + get_weights, + set_weights, + test, +) +from torch.utils.data import DataLoader + +from datasets import load_dataset +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + + +def gen_evaluate_fn( + testloader: DataLoader, + device: torch.device, +): + """Generate the function for centralized evaluation.""" + + def evaluate(server_round, parameters_ndarrays, config): + """Evaluate global model on centralized test set.""" + net = Net() + set_weights(net, parameters_ndarrays) + net.to(device) + loss, accuracy = test(net, testloader, device=device) + return loss, {"centralized_accuracy": accuracy} + + return evaluate + + +def on_fit_config(server_round: int): + """Construct `config` that clients receive when running `fit()`""" + lr = 0.1 + # Enable a simple form of learning rate decay + if server_round > 10: + lr /= 2 + return {"lr": lr} + + +# Define metric aggregation function +def weighted_average(metrics): + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"federated_evaluate_accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_eval = context.run_config["fraction-evaluate"] + server_device = context.run_config["server-device"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Prepare dataset for central evaluation + + # This is the exact same dataset as the one donwloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + global_test_set = load_dataset("zalando-datasets/fashion_mnist")["test"] + + testloader = DataLoader( + global_test_set.with_transform(apply_eval_transforms), + batch_size=32, + ) + + # Define strategy + strategy = CustomFedAvg( + run_config=context.run_config, + use_wandb=context.run_config["use-wandb"], + fraction_fit=fraction_fit, + fraction_evaluate=fraction_eval, + initial_parameters=parameters, + on_fit_config_fn=on_fit_config, + evaluate_fn=gen_evaluate_fn(testloader, device=server_device), + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/advanced-pytorch/pytorch_example/strategy.py b/examples/advanced-pytorch/pytorch_example/strategy.py new file mode 100644 index 000000000000..97fc0010f143 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/strategy.py @@ -0,0 +1,116 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from logging import INFO + +import torch +import wandb +from pytorch_example.task import Net, create_run_dir, set_weights + +from flwr.common import logger, parameters_to_ndarrays +from flwr.common.typing import UserConfig +from flwr.server.strategy import FedAvg + +PROJECT_NAME = "FLOWER-advanced-pytorch" + + +class CustomFedAvg(FedAvg): + """A class that behaves like FedAvg but has extra functionality. + + This strategy: (1) saves results to the filesystem, (2) saves a + checkpoint of the global model when a new best is found, (3) logs + results to W&B if enabled. + """ + + def __init__(self, run_config: UserConfig, use_wandb: bool, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Create a directory where to save results from this run + self.save_path, self.run_dir = create_run_dir(run_config) + self.use_wandb = use_wandb + # Initialise W&B if set + if use_wandb: + self._init_wandb_project() + + # Keep track of best acc + self.best_acc_so_far = 0.0 + + # A dictionary to store results as they come + self.results = {} + + def _init_wandb_project(self): + # init W&B + wandb.init(project=PROJECT_NAME, name=f"{str(self.run_dir)}-ServerApp") + + def _store_results(self, tag: str, results_dict): + """Store results in dictionary, then save as JSON.""" + # Update results dict + if tag in self.results: + self.results[tag].append(results_dict) + else: + self.results[tag] = [results_dict] + + # Save results to disk. + # Note we overwrite the same file with each call to this function. + # While this works, a more sophisticated approach is preferred + # in situations where the contents to be saved are larger. + with open(f"{self.save_path}/results.json", "w", encoding="utf-8") as fp: + json.dump(self.results, fp) + + def _update_best_acc(self, round, accuracy, parameters): + """Determines if a new best global model has been found. + + If so, the model checkpoint is saved to disk. + """ + if accuracy > self.best_acc_so_far: + self.best_acc_so_far = accuracy + logger.log(INFO, "💡 New best global model found: %f", accuracy) + # You could save the parameters object directly. + # Instead we are going to apply them to a PyTorch + # model and save the state dict. + # Converts flwr.common.Parameters to ndarrays + ndarrays = parameters_to_ndarrays(parameters) + model = Net() + set_weights(model, ndarrays) + # Save the PyTorch model + file_name = f"model_state_acc_{accuracy}_round_{round}.pth" + torch.save(model.state_dict(), self.save_path / file_name) + + def store_results_and_log(self, server_round: int, tag: str, results_dict): + """A helper method that stores results and logs them to W&B if enabled.""" + # Store results + self._store_results( + tag=tag, + results_dict={"round": server_round, **results_dict}, + ) + + if self.use_wandb: + # Log centralized loss and metrics to W&B + wandb.log(results_dict, step=server_round) + + def evaluate(self, server_round, parameters): + """Run centralized evaluation if callback was passed to strategy init.""" + loss, metrics = super().evaluate(server_round, parameters) + + # Save model if new best central accuracy is found + self._update_best_acc(server_round, metrics["centralized_accuracy"], parameters) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="centralized_evaluate", + results_dict={"centralized_loss": loss, **metrics}, + ) + return loss, metrics + + def aggregate_evaluate(self, server_round, results, failures): + """Aggregate results from federated evaluation.""" + loss, metrics = super().aggregate_evaluate(server_round, results, failures) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="federated_evaluate", + results_dict={"federated_evaluate_loss": loss, **metrics}, + ) + return loss, metrics diff --git a/examples/advanced-pytorch/pytorch_example/task.py b/examples/advanced-pytorch/pytorch_example/task.py new file mode 100644 index 000000000000..0224e8236408 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/task.py @@ -0,0 +1,159 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from collections import OrderedDict +from datetime import datetime +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import ( + Compose, + Normalize, + RandomCrop, + RandomHorizontalFlip, + ToTensor, +) + +from flwr.common.typing import UserConfig + +FM_NORMALIZATION = ((0.1307,), (0.3081,)) +EVAL_TRANSFORMS = Compose([ToTensor(), Normalize(*FM_NORMALIZATION)]) +TRAIN_TRANSFORMS = Compose( + [ + RandomCrop(28, padding=4), + RandomHorizontalFlip(), + ToTensor(), + Normalize(*FM_NORMALIZATION), + ] +) + + +class Net(nn.Module): + """Model (simple CNN adapted for Fashion-MNIST)""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 16, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(16, 32, 5) + self.fc1 = nn.Linear(32 * 4 * 4, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 32 * 4 * 4) + x = F.relu(self.fc1(x)) + return self.fc2(x) + + +def train(net, trainloader, epochs, lr, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["image"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def apply_train_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [TRAIN_TRANSFORMS(img) for img in batch["image"]] + return batch + + +def apply_eval_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [EVAL_TRANSFORMS(img) for img in batch["image"]] + return batch + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition FashionMNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = DirichletPartitioner( + num_partitions=num_partitions, + partition_by="label", + alpha=1.0, + seed=42, + ) + fds = FederatedDataset( + dataset="zalando-datasets/fashion_mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + train_partition = partition_train_test["train"].with_transform( + apply_train_transforms + ) + test_partition = partition_train_test["test"].with_transform(apply_eval_transforms) + trainloader = DataLoader(train_partition, batch_size=32, shuffle=True) + testloader = DataLoader(test_partition, batch_size=32) + return trainloader, testloader + + +def create_run_dir(config: UserConfig) -> Path: + """Create a directory where to save results from this run.""" + # Create output directory given current timestamp + current_time = datetime.now() + run_dir = current_time.strftime("%Y-%m-%d/%H-%M-%S") + # Save path is based on the current directory + save_path = Path.cwd() / f"outputs/{run_dir}" + save_path.mkdir(parents=True, exist_ok=False) + + # Save run config as json + with open(f"{save_path}/run_config.json", "w", encoding="utf-8") as fp: + json.dump(config, fp) + + return save_path, run_dir diff --git a/examples/advanced-pytorch/requirements.txt b/examples/advanced-pytorch/requirements.txt deleted file mode 100644 index f4d6a0774162..000000000000 --- a/examples/advanced-pytorch/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==1.13.1 -torchvision==0.14.1 -validators==0.18.2 diff --git a/examples/advanced-pytorch/run.sh b/examples/advanced-pytorch/run.sh deleted file mode 100755 index c3d52491b987..000000000000 --- a/examples/advanced-pytorch/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -python server.py --toy & -sleep 10 # Sleep for 10s to give the server enough time to start and dowload the dataset - -for i in `seq 0 9`; do - echo "Starting client $i" - python client.py --client-id=${i} --toy & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/advanced-pytorch/server.py b/examples/advanced-pytorch/server.py deleted file mode 100644 index 6b69512fb3b7..000000000000 --- a/examples/advanced-pytorch/server.py +++ /dev/null @@ -1,121 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict -from typing import Dict, Optional, Tuple - -import flwr as fl -import torch -from flwr_datasets import FederatedDataset -from torch.utils.data import DataLoader - -import utils - -warnings.filterwarnings("ignore") - - -def fit_config(server_round: int): - """Return training configuration dict for each round. - - Keep batch size fixed at 32, perform two rounds of training with one local epoch, - increase to two local epochs afterwards. - """ - config = { - "batch_size": 16, - "local_epochs": 1 if server_round < 2 else 2, - } - return config - - -def evaluate_config(server_round: int): - """Return evaluation configuration dict for each round. - - Perform five local evaluation steps on each client (i.e., use five batches) during - rounds one to three, then increase to ten local evaluation steps. - """ - val_steps = 5 if server_round < 4 else 10 - return {"val_steps": val_steps} - - -def get_evaluate_fn(model: torch.nn.Module, toy: bool): - """Return an evaluation function for server-side evaluation.""" - - # Load data here to avoid the overhead of doing it in `evaluate` itself - centralized_data = utils.load_centralized_data() - if toy: - # use only 10 samples as validation set - centralized_data = centralized_data.select(range(10)) - - val_loader = DataLoader(centralized_data, batch_size=16) - - # The `evaluate` function will be called after every round - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: - # Update model with the latest parameters - params_dict = zip(model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - loss, accuracy = utils.test(model, val_loader) - return loss, {"accuracy": accuracy} - - return evaluate - - -def main(): - """Load model for - 1. server-side parameter initialization - 2. server-side parameter evaluation - """ - - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to use only 10 datasamples for validation. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - if args.model == "alexnet": - model = utils.load_alexnet(classes=10) - else: - model = utils.load_efficientnet(classes=10) - - model_parameters = [val.cpu().numpy() for _, val in model.state_dict().items()] - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=10, - evaluate_fn=get_evaluate_fn(model, args.toy), - on_fit_config_fn=fit_config, - on_evaluate_config_fn=evaluate_config, - initial_parameters=fl.common.ndarrays_to_parameters(model_parameters), - ) - - # Start Flower server for four rounds of federated learning - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=4), - strategy=strategy, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py deleted file mode 100644 index d2b3955c9fde..000000000000 --- a/examples/advanced-pytorch/utils.py +++ /dev/null @@ -1,117 +0,0 @@ -import warnings - -import torch -from flwr_datasets import FederatedDataset -from torchvision.models import AlexNet, efficientnet_b0 -from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor - -warnings.filterwarnings("ignore") - - -def load_partition(partition_id, toy: bool = False): - """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - partition_train_test = partition_train_test.with_transform(apply_transforms) - return partition_train_test["train"], partition_train_test["test"] - - -def load_centralized_data(): - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - centralized_data = fds.load_split("test") - centralized_data = centralized_data.with_transform(apply_transforms) - return centralized_data - - -def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - pytorch_transforms = Compose( - [ - Resize(256), - CenterCrop(224), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - -def train( - net, trainloader, valloader, epochs, device: torch.device = torch.device("cpu") -): - """Train the network on the training set.""" - print("Starting training...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD( - net.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4 - ) - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optimizer.step() - - net.to("cpu") # move model back to CPU - - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) - - results = { - "train_loss": train_loss, - "train_accuracy": train_acc, - "val_loss": val_loss, - "val_accuracy": val_acc, - } - return results - - -def test( - net, testloader, steps: int = None, device: torch.device = torch.device("cpu") -): - """Validate the network on the entire test set.""" - print("Starting evalutation...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for batch_idx, batch in enumerate(testloader): - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - if steps is not None and batch_idx == steps: - break - accuracy = correct / len(testloader.dataset) - net.to("cpu") # move model back to CPU - return loss, accuracy - - -def load_efficientnet(classes: int = 10): - """Loads EfficienNetB0 from TorchVision.""" - efficientnet = efficientnet_b0(pretrained=True) - # Re-init output linear layer with the right number of classes - model_classes = efficientnet.classifier[1].in_features - if classes != model_classes: - efficientnet.classifier[1] = torch.nn.Linear(model_classes, classes) - return efficientnet - - -def get_model_params(model): - """Returns a model's parameters.""" - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def load_alexnet(classes): - """Load AlexNet model from TorchVision.""" - return AlexNet(num_classes=classes) diff --git a/examples/advanced-tensorflow/pyproject.toml b/examples/advanced-tensorflow/pyproject.toml index 02bd923129a4..9fc623a0f3ec 100644 --- a/examples/advanced-tensorflow/pyproject.toml +++ b/examples/advanced-tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Advanced Flower/TensorFlow Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } diff --git a/examples/android-kotlin/gen_tflite/pyproject.toml b/examples/android-kotlin/gen_tflite/pyproject.toml index aabf351bd51d..884e7148cc3d 100644 --- a/examples/android-kotlin/gen_tflite/pyproject.toml +++ b/examples/android-kotlin/gen_tflite/pyproject.toml @@ -5,7 +5,7 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" numpy = ">=1.23,<2.0" tensorflow-cpu = ">=2.12,<3.0" pandas = ">=2.0,<3.0" diff --git a/examples/android-kotlin/pyproject.toml b/examples/android-kotlin/pyproject.toml index 9cf0688d83b5..b83b243a349d 100644 --- a/examples/android-kotlin/pyproject.toml +++ b/examples/android-kotlin/pyproject.toml @@ -9,5 +9,5 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/android/pyproject.toml b/examples/android/pyproject.toml index 0371f7208292..d0d18ebc48bc 100644 --- a/examples/android/pyproject.toml +++ b/examples/android/pyproject.toml @@ -9,7 +9,7 @@ description = "Android Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/app-pytorch/pyproject.toml b/examples/app-pytorch/pyproject.toml index c00e38aef19b..88e916546632 100644 --- a/examples/app-pytorch/pyproject.toml +++ b/examples/app-pytorch/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies flwr = { version = "^1.8.0", extras = ["simulation"] } torch = "2.2.1" diff --git a/examples/custom-mods/pyproject.toml b/examples/custom-mods/pyproject.toml index e690e05bab8f..ff36398ef157 100644 --- a/examples/custom-mods/pyproject.toml +++ b/examples/custom-mods/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorboard = "2.16.2" torch = "1.13.1" diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index 2c2dd2742633..04185caad0f4 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -29,7 +29,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.11.0" +release = "1.12.0" # -- General configuration --------------------------------------------------- @@ -66,6 +66,10 @@ "quickstart-mxnet": "index.html", "mxnet-from-centralized-to-federated": "index.html", "app-secure-aggregation": "flower-secure-aggregation.html", + "llm-flowertune": "flowertune-llm.html", + "vit-finetune": "flowertune-vit.html", + "simulation-pytorch": "quickstart-pytorch.html", + "simulation-tensorflow": "quickstart-tensorflow.html", } diff --git a/examples/federated-kaplan-meier-fitter/README.md b/examples/federated-kaplan-meier-fitter/README.md index 1964ec4e5653..cc68a331bbba 100644 --- a/examples/federated-kaplan-meier-fitter/README.md +++ b/examples/federated-kaplan-meier-fitter/README.md @@ -69,7 +69,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` You can also check that the results match the centralized version. diff --git a/examples/federated-kaplan-meier-fitter/pyproject.toml b/examples/federated-kaplan-meier-fitter/pyproject.toml index 47cb0a4ba286..159ccc15efe4 100644 --- a/examples/federated-kaplan-meier-fitter/pyproject.toml +++ b/examples/federated-kaplan-meier-fitter/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Kaplan Meier Fitter with Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets>=0.3.0", "numpy>=1.23.2", "pandas>=2.0.0", diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md index 65c8a5b18fa8..61a6c80f3556 100644 --- a/examples/fl-dp-sa/README.md +++ b/examples/fl-dp-sa/README.md @@ -1,28 +1,63 @@ --- -tags: [basic, vision, fds] +tags: [DP, SecAgg, vision, fds] dataset: [MNIST] framework: [torch, torchvision] --- -# Example of Flower App with DP and SA +# Flower Example on MNIST with Differential Privacy and Secure Aggregation -This is a simple example that utilizes central differential privacy with client-side fixed clipping and secure aggregation. -Note: This example is designed for a small number of rounds and is intended for demonstration purposes. +This example demonstrates a federated learning setup using the Flower, incorporating central differential privacy (DP) with client-side fixed clipping and secure aggregation (SA). It is intended for a small number of rounds for demonstration purposes. -## Install dependencies +This example is similar to the [quickstart-pytorch example](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) and extends it by integrating central differential privacy and secure aggregation. For more details on differential privacy and secure aggregation in Flower, please refer to the documentation [here](https://flower.ai/docs/framework/how-to-use-differential-privacy.html) and [here](https://flower.ai/docs/framework/contributor-ref-secure-aggregation-protocols.html). -```bash -# Using pip -pip install . +## Set up the project + +### Clone the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/fl-dp-sa . && rm -rf flower && cd fl-dp-sa +``` + +This will create a new directory called `fl-dp-sa` containing the following files: -# Or using Poetry -poetry install +```shell +fl-dp-sa +├── fl_dp_sa +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -## Run +### Install dependencies and project -The example uses the MNIST dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. +Install the dependencies defined in `pyproject.toml` as well as the `fl_dp_sa` package. ```shell -flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 +# From a new python environment, run: +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "noise-multiplier=0.1 clipping-norm=5" ``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/fl-dp-sa/fl_dp_sa/__init__.py b/examples/fl-dp-sa/fl_dp_sa/__init__.py index 741260348ab8..c5c9a7e9581c 100644 --- a/examples/fl-dp-sa/fl_dp_sa/__init__.py +++ b/examples/fl-dp-sa/fl_dp_sa/__init__.py @@ -1 +1 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" diff --git a/examples/fl-dp-sa/fl_dp_sa/client.py b/examples/fl-dp-sa/fl_dp_sa/client.py deleted file mode 100644 index b3b02c6e9d61..000000000000 --- a/examples/fl-dp-sa/fl_dp_sa/client.py +++ /dev/null @@ -1,42 +0,0 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" - -from flwr.client import ClientApp, NumPyClient -from flwr.client.mod import fixedclipping_mod, secaggplus_mod - -from fl_dp_sa.task import DEVICE, Net, get_weights, load_data, set_weights, test, train - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) - - -# Define FlowerClient and client_fn -class FlowerClient(NumPyClient): - def __init__(self, trainloader, testloader) -> None: - self.trainloader = trainloader - self.testloader = testloader - - def fit(self, parameters, config): - set_weights(net, parameters) - results = train(net, self.trainloader, self.testloader, epochs=1, device=DEVICE) - return get_weights(net), len(self.trainloader.dataset), results - - def evaluate(self, parameters, config): - set_weights(net, parameters) - loss, accuracy = test(net, self.testloader) - return loss, len(self.testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - trainloader, testloader = load_data(partition_id=int(cid)) - return FlowerClient(trainloader, testloader).to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, - mods=[ - secaggplus_mod, - fixedclipping_mod, - ], -) diff --git a/examples/fl-dp-sa/fl_dp_sa/client_app.py b/examples/fl-dp-sa/fl_dp_sa/client_app.py new file mode 100644 index 000000000000..5630d4f4d14f --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/client_app.py @@ -0,0 +1,50 @@ +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.client.mod import fixedclipping_mod, secaggplus_mod + +from fl_dp_sa.task import Net, get_weights, load_data, set_weights, test, train + + +class FlowerClient(NumPyClient): + def __init__(self, trainloader, testloader) -> None: + self.net = Net() + self.trainloader = trainloader + self.testloader = testloader + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.testloader, + epochs=1, + device=self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return loss, len(self.testloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + trainloader, testloader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + return FlowerClient(trainloader, testloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + fixedclipping_mod, + ], +) diff --git a/examples/fl-dp-sa/fl_dp_sa/server.py b/examples/fl-dp-sa/fl_dp_sa/server_app.py similarity index 56% rename from examples/fl-dp-sa/fl_dp_sa/server.py rename to examples/fl-dp-sa/fl_dp_sa/server_app.py index 3ec0ba757b0d..1704b4942ff8 100644 --- a/examples/fl-dp-sa/fl_dp_sa/server.py +++ b/examples/fl-dp-sa/fl_dp_sa/server_app.py @@ -1,20 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from typing import List, Tuple from flwr.common import Context, Metrics, ndarrays_to_parameters -from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig +from flwr.server import ( + Driver, + LegacyContext, + ServerApp, + ServerConfig, +) from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping, FedAvg from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow from fl_dp_sa.task import Net, get_weights -# Define metric aggregation function def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] train_accuracies = [ num_examples * m["train_accuracy"] for num_examples, m in metrics @@ -22,7 +24,6 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - # Aggregate and return custom metric (weighted average) return { "train_loss": sum(train_losses) / sum(examples), "train_accuracy": sum(train_accuracies) / sum(examples), @@ -31,30 +32,36 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: } -# Initialize model parameters -ndarrays = get_weights(Net()) -parameters = ndarrays_to_parameters(ndarrays) +app = ServerApp() -# Define strategy -strategy = FedAvg( - fraction_fit=0.2, - fraction_evaluate=0.0, # Disable evaluation for demo purpose - min_fit_clients=20, - min_available_clients=20, - fit_metrics_aggregation_fn=weighted_average, - initial_parameters=parameters, -) -strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, noise_multiplier=0.2, clipping_norm=10, num_sampled_clients=20 -) +@app.main() +def main(driver: Driver, context: Context) -> None: + # Initialize global model + model_weights = get_weights(Net()) + parameters = ndarrays_to_parameters(model_weights) + + # Note: The fraction_fit value is configured based on the DP hyperparameter `num-sampled-clients`. + strategy = FedAvg( + fraction_fit=0.2, + fraction_evaluate=0.0, + min_fit_clients=20, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) -app = ServerApp() + noise_multiplier = context.run_config["noise-multiplier"] + clipping_norm = context.run_config["clipping-norm"] + num_sampled_clients = context.run_config["num-sampled-clients"] + strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + noise_multiplier=noise_multiplier, + clipping_norm=clipping_norm, + num_sampled_clients=num_sampled_clients, + ) -@app.main() -def main(driver: Driver, context: Context) -> None: # Construct the LegacyContext context = LegacyContext( context=context, @@ -65,8 +72,8 @@ def main(driver: Driver, context: Context) -> None: # Create the train/evaluate workflow workflow = DefaultWorkflow( fit_workflow=SecAggPlusWorkflow( - num_shares=7, - reconstruction_threshold=4, + num_shares=context.run_config["num-shares"], + reconstruction_threshold=context.run_config["reconstruction-threshold"], ) ) diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py index 5b4fd7dee592..c145cebe1378 100644 --- a/examples/fl-dp-sa/fl_dp_sa/task.py +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -1,24 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from collections import OrderedDict -from logging import INFO import torch import torch.nn as nn import torch.nn.functional as F -from flwr.common.logger import log from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.utils.data import DataLoader from torchvision.transforms import Compose, Normalize, ToTensor -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +fds = None # Cache FederatedDataset -class Net(nn.Module): - """Model.""" +class Net(nn.Module): def __init__(self) -> None: - super(Net, self).__init__() + super().__init__() self.conv1 = nn.Conv2d(1, 6, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) @@ -36,9 +34,16 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fc3(x) -def load_data(partition_id): +def load_data(partition_id: int, num_partitions: int): """Load partition MNIST data.""" - fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) @@ -70,8 +75,8 @@ def train(net, trainloader, valloader, epochs, device): loss.backward() optimizer.step() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) + train_loss, train_acc = test(net, trainloader, device) + val_loss, val_acc = test(net, valloader, device) results = { "train_loss": train_loss, @@ -82,17 +87,17 @@ def train(net, trainloader, valloader, epochs, device): return results -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" - net.to(DEVICE) + net.to(device) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): for batch in testloader: - images = batch["image"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images.to(device)) + labels = labels.to(device) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) diff --git a/examples/fl-dp-sa/flower.toml b/examples/fl-dp-sa/flower.toml deleted file mode 100644 index ea2e98206791..000000000000 --- a/examples/fl-dp-sa/flower.toml +++ /dev/null @@ -1,13 +0,0 @@ -[project] -name = "fl_dp_sa" -version = "1.0.0" -description = "" -license = "Apache-2.0" -authors = [ - "The Flower Authors ", -] -readme = "README.md" - -[flower.components] -serverapp = "fl_dp_sa.server:app" -clientapp = "fl_dp_sa.client:app" diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml index 1ca343b072d9..fbb463cc1c05 100644 --- a/examples/fl-dp-sa/pyproject.toml +++ b/examples/fl-dp-sa/pyproject.toml @@ -1,21 +1,40 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "fl-dp-sa" -version = "0.1.0" -description = "" +version = "1.0.0" +description = "Central Differential Privacy and Secure Aggregation in Flower" license = "Apache-2.0" -authors = [ - "The Flower Authors ", +dependencies = [ + "flwr[simulation]>=1.11.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", ] -readme = "README.md" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } -torch = "2.2.1" -torchvision = "0.17.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fl_dp_sa.server_app:app" +clientapp = "fl_dp_sa.client_app:app" + +[tool.flwr.app.config] +# Parameters for the DP +noise-multiplier = 0.2 +clipping-norm = 10 +num-sampled-clients = 20 +# Parameters for the SecAgg+ protocol +num-shares = 7 +reconstruction-threshold = 4 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 100 \ No newline at end of file diff --git a/examples/fl-dp-sa/requirements.txt b/examples/fl-dp-sa/requirements.txt deleted file mode 100644 index f20b9d71e339..000000000000 --- a/examples/fl-dp-sa/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.8.0 -flwr-datasets[vision]==0.0.2 -torch==2.2.1 -torchvision==0.17.1 diff --git a/examples/flower-secure-aggregation/README.md b/examples/flower-secure-aggregation/README.md index 9e92aed01d9e..0a9056263db3 100644 --- a/examples/flower-secure-aggregation/README.md +++ b/examples/flower-secure-aggregation/README.md @@ -57,7 +57,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.25 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.25" ``` To adapt the example for a practial usage, set `is-demo=false` like shown below. You might want to adjust the `num-shares` and `reconstruction-threshold` settings to suit your requirements. You can override those via `--run-config` as well. diff --git a/examples/flower-secure-aggregation/pyproject.toml b/examples/flower-secure-aggregation/pyproject.toml index d9be719653b0..6ac94253e839 100644 --- a/examples/flower-secure-aggregation/pyproject.toml +++ b/examples/flower-secure-aggregation/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Secure Aggregation in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/flower-secure-aggregation/secaggexample/server_app.py b/examples/flower-secure-aggregation/secaggexample/server_app.py index 0f1b594317fa..0b95d68e4183 100644 --- a/examples/flower-secure-aggregation/secaggexample/server_app.py +++ b/examples/flower-secure-aggregation/secaggexample/server_app.py @@ -40,6 +40,7 @@ def main(driver: Driver, context: Context) -> None: strategy = FedAvg( # Select all available clients fraction_fit=1.0, + min_fit_clients=5, # Disable evaluation in demo fraction_evaluate=(0.0 if is_demo else context.run_config["fraction-evaluate"]), min_available_clients=5, diff --git a/examples/flowertune-llm/README.md b/examples/flowertune-llm/README.md new file mode 100644 index 000000000000..51cae73ae88a --- /dev/null +++ b/examples/flowertune-llm/README.md @@ -0,0 +1,118 @@ +--- +tags: [llm, nlp, LLama] +dataset: [Alpaca-GPT4] +framework: [PEFT, torch] +--- + +# FlowerTune LLM: Federated LLM Fine-tuning with Flower + +Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. +However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. +Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. + +This introductory example conducts federated instruction tuning with pretrained [OpenLLaMA](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. +We implement FlowerTune LLM by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, +which allows users to perform the training on a single GPU. + +## Set up the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flowertune-llm . \ + && rm -rf _tmp \ + && cd flowertune-llm +``` + +This will create a new directory called `flowertune-llm` with the following structure: + +```shell +flowertune-llm +├── flowertune_llm +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── dataset.py # Defines your dataset and tokenizer +│ └── models.py # Defines your models +│ +├── pyproject.toml # Project metadata like dependencies and configs +├── test.py # Test pre-trained model +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `flowertune_llm` package. + +```bash +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +This command will run FL simulations with a 4-bit [OpenLLaMA 3Bv2](https://huggingface.co/openlm-research/open_llama_3b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: + +```bash +# Use OpenLLaMA-7B instead of 3B and 8-bits quantization +flwr run . --run-config "model.name='openlm-research/open_llama_7b_v2' model.quantization=8" + +# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% +flwr run . --run-config "num-server-rounds=50 strategy.fraction-fit=0.25" +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. + +## Expected results + +![](_static/train_loss_smooth.png) + +As expected, OpenLLaMA-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. + +## VRAM consumption + +| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | +| :----: | :---------------: | :---------------: | :---------------: | :---------------: | +| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | + +We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. +The above table shows the VRAM consumption per client for the different models considered in this example. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device. +For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. +Assigning 50% of the GPU's VRAM to each client by setting `options.backend.clientapp-gpus = 0.5` under `[tool.flwr.federations.local-simulation]` in `pyproject.toml`. + +## Test with your Questions + +We provide a script to test your trained model by passing your specified questions. For example: + +```bash +python test.py --peft-path=/path/to/trained-model-dir/ \ + --question="What is the ideal 1-day plan in London?" +``` + +An answer generated from federated trained 7-billion (8-bit) OpenLLaMA model: + +``` +Great choice. +London has so much to offer, and you can really soak up all the sights and sounds in just a single day. +Here's a suggested itinerary for you. +Start your day off with a hearty breakfast at an authentic British diner. +Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. +Next, make your way to Westminster Abbey to see the many historical monuments and memorials. +From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. +Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. +``` + +The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. +The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! diff --git a/examples/llm-flowertune/_static/train_loss_smooth.png b/examples/flowertune-llm/_static/train_loss_smooth.png similarity index 100% rename from examples/llm-flowertune/_static/train_loss_smooth.png rename to examples/flowertune-llm/_static/train_loss_smooth.png diff --git a/examples/flowertune-llm/flowertune_llm/__init__.py b/examples/flowertune-llm/flowertune_llm/__init__.py new file mode 100644 index 000000000000..e786a4d4b73d --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/__init__.py @@ -0,0 +1 @@ +"""flowertune_llm.""" diff --git a/examples/flowertune-llm/flowertune_llm/client_app.py b/examples/flowertune-llm/flowertune_llm/client_app.py new file mode 100644 index 000000000000..b61a733b29cf --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/client_app.py @@ -0,0 +1,126 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +import warnings +from typing import Dict, Tuple + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar +from omegaconf import DictConfig + +from transformers import TrainingArguments +from trl import SFTTrainer + +from flowertune_llm.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from flowertune_llm.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-instance-attributes +class FlowerClient(NumPyClient): + """Standard Flower client for CNN training.""" + + def __init__( + self, + model_cfg: DictConfig, + train_cfg: DictConfig, + trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ): # pylint: disable=too-many-arguments + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.train_cfg = train_cfg + self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) + self.tokenizer = tokenizer + self.formatting_prompts_func = formatting_prompts_func + self.data_collator = data_collator + self.num_rounds = num_rounds + self.trainset = trainset + + # instantiate model + self.model = get_model(model_cfg) + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Implement distributed fit function for a given client.""" + set_parameters(self.model, parameters) + + new_lr = cosine_annealing( + int(config["current_round"]), + self.num_rounds, + self.train_cfg.learning_rate_max, + self.train_cfg.learning_rate_min, + ) + + self.training_argumnets.learning_rate = new_lr + self.training_argumnets.output_dir = config["save_path"] + + # Construct trainer + trainer = SFTTrainer( + model=self.model, + tokenizer=self.tokenizer, + args=self.training_argumnets, + max_seq_length=self.train_cfg.seq_length, + train_dataset=self.trainset, + formatting_func=self.formatting_prompts_func, + data_collator=self.data_collator, + ) + + # Do local training + results = trainer.train() + + return ( + get_parameters(self.model), + len(self.trainset), + {"train_loss": results.training_loss}, + ) + + +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/llm-flowertune/dataset.py b/examples/flowertune-llm/flowertune_llm/dataset.py similarity index 53% rename from examples/llm-flowertune/dataset.py rename to examples/flowertune-llm/flowertune_llm/dataset.py index 571be31f7fba..87595b3f9ccd 100644 --- a/examples/llm-flowertune/dataset.py +++ b/examples/flowertune-llm/flowertune_llm/dataset.py @@ -1,6 +1,11 @@ from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM +from flwr_datasets.partitioner import IidPartitioner +from flwr_datasets import FederatedDataset + +FDS = None # Cache FederatedDataset + def formatting_prompts_func(example): output_texts = [] @@ -27,3 +32,31 @@ def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): ) return tokenizer, data_collator, formatting_prompts_func + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = client_trainset.rename_column("output", "response") + + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/examples/llm-flowertune/models.py b/examples/flowertune-llm/flowertune_llm/models.py similarity index 68% rename from examples/llm-flowertune/models.py rename to examples/flowertune-llm/flowertune_llm/models.py index f32c800cf2c1..e1609caeb2fc 100644 --- a/examples/llm-flowertune/models.py +++ b/examples/flowertune-llm/flowertune_llm/models.py @@ -2,10 +2,18 @@ import torch from omegaconf import DictConfig -from peft import LoraConfig, get_peft_model +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) from peft.utils import prepare_model_for_kbit_training from transformers import AutoModelForCausalLM, BitsAndBytesConfig +from flwr.common.typing import NDArrays + def cosine_annealing( current_round: int, @@ -53,3 +61,17 @@ def get_model(model_cfg: DictConfig): ) return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/examples/flowertune-llm/flowertune_llm/server_app.py b/examples/flowertune-llm/flowertune_llm/server_app.py new file mode 100644 index 000000000000..ff0da90c8b9b --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/server_app.py @@ -0,0 +1,94 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from omegaconf import DictConfig + +from flowertune_llm.models import get_model, get_parameters, set_parameters +from flowertune_llm.dataset import replace_keys + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the client's + fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FedAvg( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/flowertune-llm/pyproject.toml b/examples/flowertune-llm/pyproject.toml new file mode 100644 index 000000000000..5c057de2ea70 --- /dev/null +++ b/examples/flowertune-llm/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "flowertune-llm" +version = "1.0.0" +description = "FlowerTune LLM: Federated LLM Fine-tuning with Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.11.1", + "flwr-datasets>=0.3.0", + "trl==0.8.1", + "bitsandbytes==0.43.0", + "scipy==1.13.0", + "peft==0.6.2", + "fschat[model_worker,webui]==0.2.35", + "transformers==4.39.3", + "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "flowertune_llm.server_app:app" +clientapp = "flowertune_llm.client_app:app" + +[tool.flwr.app.config] +dataset.name = "vicgalle/alpaca-gpt4" +model.name = "openlm-research/open_llama_3b_v2" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = 0.1 +strategy.fraction-evaluate = 0.0 +num-server-rounds = 100 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 +options.backend.client-resources.num-cpus = 8 +options.backend.client-resources.num-gpus = 1.0 diff --git a/examples/llm-flowertune/test.py b/examples/flowertune-llm/test.py similarity index 100% rename from examples/llm-flowertune/test.py rename to examples/flowertune-llm/test.py diff --git a/examples/flowertune-vit/README.md b/examples/flowertune-vit/README.md index 9e2b0fd6b079..48327880f412 100644 --- a/examples/flowertune-vit/README.md +++ b/examples/flowertune-vit/README.md @@ -59,7 +59,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,batch-size=64 +flwr run . --run-config "num-server-rounds=5 batch-size=64" ``` Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 5x`ClientApp` will run in parallel in the available GPU. You can tweak the degree of parallelism by adjusting the settings of this federation in the `pyproject.toml`. diff --git a/examples/flowertune-vit/pyproject.toml b/examples/flowertune-vit/pyproject.toml index 0f11dc54c81a..d0feabc14212 100644 --- a/examples/flowertune-vit/pyproject.toml +++ b/examples/flowertune-vit/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Finetuning of a Vision Transformer with Flower" license = "Apache-2.0" dependencies = [ - "flwr-nightly[simulation]==1.11.0.dev20240823", + "flwr[simulation]==1.11.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/ios/pyproject.toml b/examples/ios/pyproject.toml index 2e55b14cf761..03ea89ea3e54 100644 --- a/examples/ios/pyproject.toml +++ b/examples/ios/pyproject.toml @@ -9,5 +9,5 @@ description = "Example Server for Flower iOS/CoreML" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/llm-flowertune/README.md b/examples/llm-flowertune/README.md deleted file mode 100644 index 46076e0b2078..000000000000 --- a/examples/llm-flowertune/README.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Federated LLM Fine-tuning with Flower -tags: [llm, nlp, LLama2] -dataset: [Alpaca-GPT4] -framework: [PEFT, torch] ---- - -# LLM FlowerTune: Federated LLM Fine-tuning with Flower - -Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. -However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. -Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. - -This introductory example conducts federated instruction tuning with pretrained [LLama2](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. -We implement LLM FlowerTune by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, -which allows users to perform the training on a single GPU. - -## Environment Setup - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/llm-flowertune . && rm -rf flower && cd llm-flowertune -``` - -This will create a new directory called `llm-flowertune` containing the following files: - -``` --- README.md <- Your're reading this right now --- main.py <- Start fed-LLM simulation --- client.py <- Flower client constructor --- model.py <- Model build --- dataset.py <- Dataset and tokenizer build --- utils.py <- Utility functions --- test.py <- Test pre-trained model --- app.py <- ServerApp/ClientApp for Flower-Next --- conf/config.yaml <- Configuration file --- requirements.txt <- Example dependencies -``` - -### Installing dependencies - -Project dependencies are defined in `requirements.txt`. Install them with: - -```shell -pip install -r requirements.txt -``` - -## Run LLM Fine-tuning - -With an activated Python environment, run the example with default config values. The config is in `conf/config.yaml` and is loaded automatically. - -```bash -# Run with default config -python main.py -``` - -This command will run FL simulations with a 4-bit [OpenLLaMA 7Bv2](https://huggingface.co/openlm-research/open_llama_7b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: - -```bash -# Use OpenLLaMA-3B instead of 7B and 8-bits quantization -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=8 - -# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% -python main.py num_rounds=50 fraction_fit.fraction_fit=0.25 -``` - -## Expected Results - -![](_static/train_loss_smooth.png) - -As expected, LLama2-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. - -You can run all 8 experiments with a single command as: - -```bash -python main.py --multirun model.name="openlm-research/open_llama_7b_v2","openlm-research/open_llama_3b_v2" model.quantization=8,4 strategy.fraction_fit=0.1,0.2 -``` - -## VRAM Consumption - -| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | -| :----: | :---------------: | :---------------: | :---------------: | :---------------: | -| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | - -We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. -The above table shows the VRAM consumption per client for the different models considered in this example. -You can adjust the CPU/GPU resources you assign to each of the clients based on your device. -For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. - -```bash -# This will assign 50% of the GPU's VRAM to each client. -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=4 client_resources.num_gpus=0.5 -``` - -## Test with your Questions - -We provide a script to test your trained model by passing your specified questions. For example: - -```bash -python test.py --peft-path=/path/to/trained-model-dir/ \ - --question="What is the ideal 1-day plan in London?" -``` - -An answer generated from federated trained 7-billion (8-bit) LLama2 model: - -``` -Great choice. -London has so much to offer, and you can really soak up all the sights and sounds in just a single day. -Here's a suggested itinerary for you. -Start your day off with a hearty breakfast at an authentic British diner. -Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. -Next, make your way to Westminster Abbey to see the many historical monuments and memorials. -From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. -Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. -``` - -The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. -The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! - -## Run with Flower Next (preview) - -We conduct a 2-client setting to demonstrate how to run federated LLM fine-tuning with Flower Next. -Please follow the steps below: - -1. Start the long-running Flower server (SuperLink) - ```bash - flower-superlink --insecure - ``` -2. Start the long-running Flower client (SuperNode) - ```bash - # In a new terminal window, start the first long-running Flower client: - flower-client-app app:client1 --insecure - ``` - ```bash - # In another new terminal window, start the second long-running Flower client: - flower-client-app app:client2 --insecure - ``` -3. Run the Flower App - ```bash - # With both the long-running server (SuperLink) and two clients (SuperNode) up and running, - # we can now run the actual Flower App: - flower-server-app app:server --insecure - ``` diff --git a/examples/llm-flowertune/app.py b/examples/llm-flowertune/app.py deleted file mode 100644 index db6595c94d31..000000000000 --- a/examples/llm-flowertune/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import warnings - -import flwr as fl -from flwr_datasets import FederatedDataset -from hydra import compose, initialize - -from client import gen_client_fn -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from utils import fit_weighted_average, get_on_fit_config - -warnings.filterwarnings("ignore", category=UserWarning) - -NUM_ROUNDS = 100 -save_path = "./results/" - -with initialize(config_path="conf"): - cfg = compose(config_name="config") - -# Reset the number of number -cfg.num_rounds = NUM_ROUNDS -cfg.train.num_rounds = NUM_ROUNDS - -# Create output directory -if not os.path.exists(save_path): - os.mkdir(save_path) - -# Partition dataset and get dataloaders -# We set the number of partitions to 20 for fast processing. -fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} -) -( - tokenizer, - data_collator, - formatting_prompts_func, -) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) - - -# ClientApp for client #1 (Flower Next) -client1 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=0, - api=True, - ), -) - - -# ClientApp for client #2 (Flower Next) -client2 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=1, - api=True, - ), -) - - -# Instantiate strategy. -strategy = fl.server.strategy.FedAvg( - min_available_clients=2, # Simulate a 2-client setting - fraction_fit=1.0, - fraction_evaluate=0.0, # no client evaluation - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) diff --git a/examples/llm-flowertune/client.py b/examples/llm-flowertune/client.py deleted file mode 100644 index c81333f664b3..000000000000 --- a/examples/llm-flowertune/client.py +++ /dev/null @@ -1,129 +0,0 @@ -from collections import OrderedDict -from typing import Callable, Dict, Tuple - -import flwr as fl -import torch -from flwr.common.typing import NDArrays, Scalar -from omegaconf import DictConfig -from peft import get_peft_model_state_dict, set_peft_model_state_dict -from transformers import TrainingArguments -from trl import SFTTrainer - -from models import cosine_annealing, get_model - - -# pylint: disable=too-many-arguments -class FlowerClient( - fl.client.NumPyClient -): # pylint: disable=too-many-instance-attributes - """Standard Flower client for CNN training.""" - - def __init__( - self, - model_cfg: DictConfig, - train_cfg: DictConfig, - trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ): # pylint: disable=too-many-arguments - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.train_cfg = train_cfg - self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) - self.tokenizer = tokenizer - self.formatting_prompts_func = formatting_prompts_func - self.data_collator = data_collator - self.save_path = save_path - - # instantiate model - self.model = get_model(model_cfg) - - self.trainset = trainset - - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - """Return the parameters of the current net.""" - - state_dict = get_peft_model_state_dict(self.model) - return [val.cpu().numpy() for _, val in state_dict.items()] - - def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict]: - """Implement distributed fit function for a given client.""" - set_parameters(self.model, parameters) - - new_lr = cosine_annealing( - int(config["current_round"]), - self.train_cfg.num_rounds, - self.train_cfg.learning_rate_max, - self.train_cfg.learning_rate_min, - ) - - self.training_argumnets.learning_rate = new_lr - self.training_argumnets.output_dir = self.save_path - - # Construct trainer - trainer = SFTTrainer( - model=self.model, - tokenizer=self.tokenizer, - args=self.training_argumnets, - max_seq_length=self.train_cfg.seq_length, - train_dataset=self.trainset, - formatting_func=self.formatting_prompts_func, - data_collator=self.data_collator, - ) - - # Do local training - results = trainer.train() - - return ( - self.get_parameters({}), - len(self.trainset), - {"train_loss": results.training_loss}, - ) - - -def set_parameters(model, parameters: NDArrays) -> None: - """Change the parameters of the model using the given ones.""" - peft_state_dict_keys = get_peft_model_state_dict(model).keys() - params_dict = zip(peft_state_dict_keys, parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - set_peft_model_state_dict(model, state_dict) - - -def gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - model_cfg: DictConfig, - train_cfg: DictConfig, - save_path: str, - partition_id: int = 0, - api: bool = False, -) -> Callable[[str], FlowerClient]: # pylint: disable=too-many-arguments - """Generate the client function that creates the Flower Clients.""" - - def client_fn(cid: str) -> FlowerClient: - """Create a Flower client representing a single organization.""" - - # Let's get the partition corresponding to the i-th client - client_trainset = ( - fds.load_partition(partition_id, "train") - if api - else fds.load_partition(int(cid), "train") - ) - client_trainset = client_trainset.rename_column("output", "response") - - return FlowerClient( - model_cfg, - train_cfg, - client_trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ).to_client() - - return client_fn diff --git a/examples/llm-flowertune/conf/config.yaml b/examples/llm-flowertune/conf/config.yaml deleted file mode 100644 index 0b769d351479..000000000000 --- a/examples/llm-flowertune/conf/config.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Federated Instruction Tuning on General Dataset ---- - -num_clients: 20 # total number of clients -num_rounds: 100 - -dataset: - name: "vicgalle/alpaca-gpt4" - -model: - name: "openlm-research/open_llama_7b_v2" - quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes - gradient_checkpointing: True - lora: - peft_lora_r: 32 - peft_lora_alpha: 64 - -train: - num_rounds: ${num_rounds} - save_every_round: 5 - learning_rate_max: 5e-5 - learning_rate_min: 1e-6 - seq_length: 512 - training_arguments: - output_dir: null # to be set by hydra - learning_rate: null # to be set by the client - per_device_train_batch_size: 16 - gradient_accumulation_steps: 1 - logging_steps: 10 - num_train_epochs: 3 - max_steps: 10 - report_to: null - save_steps: 1000 - save_total_limit: 10 - gradient_checkpointing: ${model.gradient_checkpointing} - lr_scheduler_type: "constant" - -strategy: - _target_: flwr.server.strategy.FedAvg - fraction_fit: 0.1 # sample 10% of clients (i.e. 2 per round) - fraction_evaluate: 0.0 # no client evaluation - -client_resources: - num_cpus: 8 - num_gpus: 1.0 diff --git a/examples/llm-flowertune/main.py b/examples/llm-flowertune/main.py deleted file mode 100644 index ec8308601efb..000000000000 --- a/examples/llm-flowertune/main.py +++ /dev/null @@ -1,92 +0,0 @@ -import pickle -import warnings - -import flwr as fl -import hydra -from flwr_datasets import FederatedDataset -from hydra.core.hydra_config import HydraConfig -from hydra.utils import instantiate -from omegaconf import DictConfig, OmegaConf - -from client import gen_client_fn -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from utils import fit_weighted_average, get_evaluate_fn, get_on_fit_config - -warnings.filterwarnings("ignore", category=UserWarning) - - -@hydra.main(config_path="conf", config_name="config", version_base=None) -def main(cfg: DictConfig) -> None: - """Run federated LLM fine-tuning. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # Print config structured as YAML - print(OmegaConf.to_yaml(cfg)) - - # Partition dataset and get dataloaders - fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} - ) - ( - tokenizer, - data_collator, - formatting_prompts_func, - ) = get_tokenizer_and_data_collator_and_propt_formatting( - cfg.model.name, - ) - - # Hydra automatically creates an output directory - # Let's retrieve it and save some results there - save_path = HydraConfig.get().runtime.output_dir - - # Prepare function that will be used to spawn each client - client_fn = gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - ) - - # Instantiate strategy according to config. Here we pass other arguments - # that are only defined at run time. - strategy = instantiate( - cfg.strategy, - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, - evaluate_fn=get_evaluate_fn( - cfg.model, cfg.train.save_every_round, cfg.num_rounds, save_path - ), - ) - - # Start simulation - history = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=cfg.num_clients, - config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), - client_resources={ - "num_cpus": cfg.client_resources.num_cpus, - "num_gpus": cfg.client_resources.num_gpus, - }, - strategy=strategy, - ) - - # Experiment completed. Now we save the results and - # generate plots using the `history` - print("................") - print(history) - - # Save results as a Python pickle using a file_path - # the directory created by Hydra for each run - with open(f"{save_path}/results.pkl", "wb") as f: - pickle.dump(history, f) - - -if __name__ == "__main__": - main() diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt deleted file mode 100644 index 2d0e65da3615..000000000000 --- a/examples/llm-flowertune/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -flwr[rest,simulation]>=1.8.0, <2.0 -flwr-datasets>=0.0.2 -hydra-core==1.3.2 -trl==0.7.2 -bitsandbytes==0.41.3 -scipy==1.11.2 -peft==0.4.0 -fschat[model_worker,webui]==0.2.35 -transformers==4.38.1 -hf_transfer==0.1.8 diff --git a/examples/llm-flowertune/utils.py b/examples/llm-flowertune/utils.py deleted file mode 100644 index bbb607810537..000000000000 --- a/examples/llm-flowertune/utils.py +++ /dev/null @@ -1,43 +0,0 @@ -from client import set_parameters -from models import get_model - - -# Get function that will be executed by the strategy's evaluate() method -# Here we use it to save global model checkpoints -def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): - """Return an evaluation function for saving global model.""" - - def evaluate(server_round: int, parameters, config): - # Save model - if server_round != 0 and ( - server_round == total_round or server_round % save_every_round == 0 - ): - # Init model - model = get_model(model_cfg) - set_parameters(model, parameters) - - model.save_pretrained(f"{save_path}/peft_{server_round}") - - return 0.0, {} - - return evaluate - - -# Get a function that will be used to construct the config that the client's -# fit() method will receive -def get_on_fit_config(): - def fit_config_fn(server_round: int): - fit_config = {"current_round": server_round} - return fit_config - - return fit_config_fn - - -def fit_weighted_average(metrics): - """Aggregation function for (federated) evaluation metrics.""" - # Multiply accuracy of each client by number of examples used - losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"train_loss": sum(losses) / sum(examples)} diff --git a/examples/pytorch-from-centralized-to-federated/pyproject.toml b/examples/pytorch-from-centralized-to-federated/pyproject.toml index 3d1559e3a515..57a8082fd6bf 100644 --- a/examples/pytorch-from-centralized-to-federated/pyproject.toml +++ b/examples/pytorch-from-centralized-to-federated/pyproject.toml @@ -9,7 +9,7 @@ description = "PyTorch: From Centralized To Federated with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } torch = "1.13.1" diff --git a/examples/quickstart-fastai/pyproject.toml b/examples/quickstart-fastai/pyproject.toml index 4d160bae0eec..25219ffcac4c 100644 --- a/examples/quickstart-fastai/pyproject.toml +++ b/examples/quickstart-fastai/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with Fastai and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "fastai==2.7.14", "torch==2.2.0", diff --git a/examples/quickstart-huggingface/README.md b/examples/quickstart-huggingface/README.md index ac0acebb9b99..124689441656 100644 --- a/examples/quickstart-huggingface/README.md +++ b/examples/quickstart-huggingface/README.md @@ -8,7 +8,7 @@ framework: [transformers] This introductory example to using [🤗Transformers](https://huggingface.co/docs/transformers/en/index) with Flower. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. -In this example, we will federated the training of a [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) modle on the [IMDB](https://huggingface.co/datasets/stanfordnlp/imdb) dataset. The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). This example runs best when a GPU is available. +In this example, we will federated the training of a [BERT-tiny](https://huggingface.co/prajjwal1/bert-tiny) modle on the [IMDB](https://huggingface.co/datasets/stanfordnlp/imdb) dataset. The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). This example runs best when a GPU is available. ## Set up the project @@ -57,7 +57,7 @@ You can run your Flower project in both _simulation_ and _deployment_ mode witho flwr run . ``` -Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 1x`ClientApp` (using ~12 GB of VRAM) will run in parallel in each available GPU. Note you can adjust the degree of paralellism but modifying the `client-resources` specification. +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 4x`ClientApp` (using ~1 GB of VRAM each) will run in parallel in each available GPU. Note you can adjust the degree of paralellism but modifying the `client-resources` specification. ```bash # Run with the `local-simulation-gpu` federation @@ -67,7 +67,7 @@ flwr run . local-simulation-gpu You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example ```bash -flwr run --run-config num-server-rounds=5 +flwr run --run-config "num-server-rounds=5 fraction-fit=0.1" ``` > \[!TIP\] diff --git a/examples/quickstart-huggingface/huggingface_example/task.py b/examples/quickstart-huggingface/huggingface_example/task.py index 25304d134a67..1c5b8d087dca 100644 --- a/examples/quickstart-huggingface/huggingface_example/task.py +++ b/examples/quickstart-huggingface/huggingface_example/task.py @@ -40,7 +40,7 @@ def load_data( tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=512) def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) + return tokenizer(examples["text"], truncation=True, add_special_tokens=True) partition_train_test = partition_train_test.map(tokenize_function, batched=True) partition_train_test = partition_train_test.remove_columns("text") diff --git a/examples/quickstart-huggingface/pyproject.toml b/examples/quickstart-huggingface/pyproject.toml index af48b2429635..f479acfa0918 100644 --- a/examples/quickstart-huggingface/pyproject.toml +++ b/examples/quickstart-huggingface/pyproject.toml @@ -12,7 +12,7 @@ authors = [ { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ - "flwr-nightly[simulation]==1.11.0.dev20240823", + "flwr[simulation]==1.11.0", "flwr-datasets>=0.3.0", "torch==2.4.0", "transformers>=4.30.0,<5.0", @@ -33,7 +33,7 @@ clientapp = "huggingface_example.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 -model-name = "distilbert-base-uncased" +model-name = "prajjwal1/bert-tiny" fraction-fit = 0.05 fraction-evaluate = 0.1 @@ -46,4 +46,4 @@ options.num-supernodes = 100 [tool.flwr.federations.local-simulation-gpu] options.num-supernodes = 100 options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs -options.backend.client-resources.num-gpus = 1.0 # at most 1 ClientApp will run in a given GPU (lower it to increase parallelism) \ No newline at end of file +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApp will run in a given GPU (lower it to increase parallelism) diff --git a/examples/quickstart-jax/pyproject.toml b/examples/quickstart-jax/pyproject.toml index c956191369b5..68a3455aedee 100644 --- a/examples/quickstart-jax/pyproject.toml +++ b/examples/quickstart-jax/pyproject.toml @@ -5,7 +5,7 @@ description = "JAX example training a linear regression model with federated lea authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = "1.0.0" jax = "0.4.17" jaxlib = "0.4.17" diff --git a/examples/quickstart-mlcube/pyproject.toml b/examples/quickstart-mlcube/pyproject.toml index a2862bd5ebb7..f790a596ed19 100644 --- a/examples/quickstart-mlcube/pyproject.toml +++ b/examples/quickstart-mlcube/pyproject.toml @@ -9,7 +9,7 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/quickstart-mlx/README.md b/examples/quickstart-mlx/README.md index 95b9ccf605b5..5914ce5f31dd 100644 --- a/examples/quickstart-mlx/README.md +++ b/examples/quickstart-mlx/README.md @@ -58,7 +58,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] @@ -67,4 +67,4 @@ flwr run . --run-config num-server-rounds=5,learning-rate=0.05 ### Run with the Deployment Engine > \[!NOTE\] -> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates. diff --git a/examples/quickstart-mlx/pyproject.toml b/examples/quickstart-mlx/pyproject.toml index 36e39bcd6d78..459cac86f5d6 100644 --- a/examples/quickstart-mlx/pyproject.toml +++ b/examples/quickstart-mlx/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with MLX and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "mlx==0.16.0", "numpy==1.26.4", diff --git a/examples/quickstart-monai/README.md b/examples/quickstart-monai/README.md index c470a6a6c86f..8189a8e98406 100644 --- a/examples/quickstart-monai/README.md +++ b/examples/quickstart-monai/README.md @@ -70,7 +70,7 @@ flwr run . local-simulation-gpu You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,batch-size=32 +flwr run . --run-config "num-server-rounds=5 batch-size=32" ``` ### Run with the Deployment Engine diff --git a/examples/quickstart-monai/monaiexample/task.py b/examples/quickstart-monai/monaiexample/task.py index 09597562a1f2..4f7972d455fd 100644 --- a/examples/quickstart-monai/monaiexample/task.py +++ b/examples/quickstart-monai/monaiexample/task.py @@ -189,9 +189,10 @@ def _download_and_extract_if_needed(url, dest_folder): # Download the tar.gz file tar_gz_filename = url.split("/")[-1] if not os.path.isfile(tar_gz_filename): - with request.urlopen(url) as response, open( - tar_gz_filename, "wb" - ) as out_file: + with ( + request.urlopen(url) as response, + open(tar_gz_filename, "wb") as out_file, + ): out_file.write(response.read()) # Extract the tar.gz file diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index 6ecf5011d24f..daa92fc0387d 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with MONAI and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr-nightly[simulation]==1.11.0.dev20240823", + "flwr[simulation]==1.11.0", "flwr-datasets[vision]>=0.3.0", "monai==1.3.2", "filelock==3.15.4", diff --git a/examples/quickstart-pandas/README.md b/examples/quickstart-pandas/README.md index 0b4b3a6ac78a..3f522b26834d 100644 --- a/examples/quickstart-pandas/README.md +++ b/examples/quickstart-pandas/README.md @@ -4,85 +4,69 @@ dataset: [Iris] framework: [pandas] --- -# Flower Example using Pandas +# Federated Learning with Pandas and Flower (Quickstart Example) -This introductory example to Flower uses Pandas, but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to -download, partition and preprocess the dataset. +> \[!CAUTION\] +> This example uses Flower's low-level API which remains a preview feature and subject to change. Both `ClientApp` and `ServerApp` operate directly on [Message](https://flower.ai/docs/framework/ref-api/flwr.common.Message.html) and [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html) objects. + +This introductory example to Flower uses [Pandas](https://pandas.pydata.org/), but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to +download, partition and preprocess the [Iris dataset](https://huggingface.co/datasets/scikit-learn/iris). Running this example in itself is quite easy. -## Project Setup +This example implements a form of Federated Analyics by which instead of training a model using locally available data, the nodes run a query on the data they own. In this example the query is to compute the histogram on specific columns of the dataset. These metrics are sent to the `ServerApp` for aggregation. -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +## Set up the project -```shell -$ git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-pandas . && rm -rf _tmp && cd quickstart-pandas -``` +### Clone the project -This will create a new directory called `quickstart-pandas` containing the following files: +Start by cloning the example project. ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- start.sh --- README.md +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pandas . \ + && rm -rf _tmp && cd quickstart-pandas ``` -If you don't plan on using the `run.sh` script that automates the run, you should first download the data and put it in a `data` folder, this can be done by executing: +This will create a new directory called `quickstart-pandas` with the following structure: ```shell -$ mkdir -p ./data -$ python -c "from sklearn.datasets import load_iris; load_iris(as_frame=True)['data'].to_csv('./data/client.csv')" +quickstart-pandas +├── pandas_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ └── server_app.py # Defines your ServerApp +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `pandas` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Install the dependencies defined in `pyproject.toml` as well as the `pandas_example` package. -```shell -poetry run python3 -c "import flwr" +```bash +pip install -e . ``` -If you don't see any errors you're good to go! +## Run the project -#### pip +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +### Run with the Simulation Engine -```shell -pip install -r requirements.txt +```bash +flwr run . ``` -## Run Federated Analytics with Pandas and Flower +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -$ python3 server.py +```bash +flwr run . --run-config num-server-rounds=5 ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pandas.html) -```shell -$ python3 client.py --partition-id 0 -``` - -Start client 2 in the second terminal: - -```shell -$ python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that the server is printing aggregated statistics about the dataset distributed amongst clients. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-pandas.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-pandas/client.py b/examples/quickstart-pandas/client.py deleted file mode 100644 index 5a501e3517e6..000000000000 --- a/examples/quickstart-pandas/client.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse -from typing import Dict, List, Tuple - -import flwr as fl -import numpy as np -import pandas as pd -from flwr_datasets import FederatedDataset - -column_names = ["sepal_length", "sepal_width"] - - -def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: - freqs, _ = np.histogram(df[col_name]) - return freqs - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__(self, X: pd.DataFrame): - self.X = X - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int, Dict]: - hist_list = [] - # Execute query locally - for c in self.X.columns: - hist = compute_hist(self.X, c) - hist_list.append(hist) - return ( - hist_list, - len(self.X), - {}, - ) - - -if __name__ == "__main__": - N_CLIENTS = 2 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the partition id of artificially partitioned datasets.", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Load the partition data - fds = FederatedDataset(dataset="hitorilabs/iris", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] - # Use just the specified columns - X = dataset[column_names] - - # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient(X).to_client(), - ) diff --git a/examples/quickstart-pandas/pandas_example/__init__.py b/examples/quickstart-pandas/pandas_example/__init__.py new file mode 100644 index 000000000000..9e5b1a942dd8 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/__init__.py @@ -0,0 +1 @@ +"""pandas_example: A Flower / Pandas app.""" diff --git a/examples/quickstart-pandas/pandas_example/client_app.py b/examples/quickstart-pandas/pandas_example/client_app.py new file mode 100644 index 000000000000..0194b0dadf3a --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/client_app.py @@ -0,0 +1,59 @@ +"""pandas_example: A Flower / Pandas app.""" + +import warnings + +import numpy as np +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +from flwr.client import ClientApp +from flwr.common import Context, Message, MetricsRecord, RecordSet + +fds = None # Cache FederatedDataset + +warnings.filterwarnings("ignore", category=UserWarning) + + +def get_clientapp_dataset(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="scikit-learn/iris", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + # Use just the specified columns + return dataset[["SepalLengthCm", "SepalWidthCm"]] + + +# Flower ClientApp +app = ClientApp() + + +@app.query() +def query(msg: Message, context: Context): + """Construct histogram of local dataset and report to `ServerApp`.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + dataset = get_clientapp_dataset(partition_id, num_partitions) + + metrics = {} + # Compute some statistics for each column in the dataframe + for feature_name in dataset.columns: + # Compute histogram + freqs, _ = np.histogram(dataset[feature_name], bins=np.linspace(2.0, 10.0, 10)) + metrics[feature_name] = freqs.tolist() + + # Compute weighted average + metrics[f"{feature_name}_avg"] = dataset[feature_name].mean() * len(dataset) + metrics[f"{feature_name}_count"] = len(dataset) + + reply_content = RecordSet(metrics_records={"query_results": MetricsRecord(metrics)}) + + return msg.create_reply(reply_content) diff --git a/examples/quickstart-pandas/pandas_example/server_app.py b/examples/quickstart-pandas/pandas_example/server_app.py new file mode 100644 index 000000000000..95384c3fa978 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/server_app.py @@ -0,0 +1,87 @@ +"""pandas_example: A Flower / Pandas app.""" + +import random +import time +from logging import INFO + +import numpy as np + +from flwr.common import Context, MessageType, RecordSet, Message +from flwr.common.logger import log +from flwr.server import Driver, ServerApp + +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + """This `ServerApp` construct a histogram from partial-histograms reported by the + `ClientApp`s.""" + + num_rounds = context.run_config["num-server-rounds"] + min_nodes = 2 + fraction_sample = context.run_config["fraction-sample"] + + for server_round in range(num_rounds): + log(INFO, "") # Add newline for log readability + log(INFO, "Starting round %s/%s", server_round + 1, num_rounds) + + # Loop and wait until enough nodes are available. + all_node_ids = [] + while len(all_node_ids) < min_nodes: + all_node_ids = driver.get_node_ids() + if len(all_node_ids) >= min_nodes: + # Sample nodes + num_to_sample = int(len(all_node_ids) * fraction_sample) + node_ids = random.sample(all_node_ids, num_to_sample) + break + log(INFO, "Waiting for nodes to connect...") + time.sleep(2) + + log(INFO, "Sampled %s nodes (out of %s)", len(node_ids), len(all_node_ids)) + + # Create messages + recordset = RecordSet() + messages = [] + for node_id in node_ids: # one message for each node + message = driver.create_message( + content=recordset, + message_type=MessageType.QUERY, # target `query` method in ClientApp + dst_node_id=node_id, + group_id=str(server_round), + ) + messages.append(message) + + # Send messages and wait for all results + replies = driver.send_and_receive(messages) + log(INFO, "Received %s/%s results", len(replies), len(messages)) + + # Aggregate partial histograms + aggregated_hist = aggregate_partial_histograms(replies) + + # Display aggregated histogram + log(INFO, "Aggregated histogram: %s", aggregated_hist) + + +def aggregate_partial_histograms(messages: Message): + """Aggregate partial histograms.""" + + aggregated_hist = {} + total_count = 0 + for rep in messages: + if rep.has_error(): + continue + query_results = rep.content.metrics_records["query_results"] + # Sum metrics + for k, v in query_results.items(): + if k in ["SepalLengthCm", "SepalWidthCm"]: + if k in aggregated_hist: + aggregated_hist[k] += np.array(v) + else: + aggregated_hist[k] = np.array(v) + if "_count" in k: + total_count += v + + # Verify aggregated histogram adds up to total reported count + assert total_count == sum([sum(v) for v in aggregated_hist.values()]) + return aggregated_hist diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml index 2e6b1424bb54..7df8ab86cb0c 100644 --- a/examples/quickstart-pandas/pyproject.toml +++ b/examples/quickstart-pandas/pyproject.toml @@ -1,17 +1,39 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pandas" -version = "0.1.0" -description = "Pandas Federated Analytics Quickstart with Flower" -authors = ["Ragy Haddad "] -maintainers = ["The Flower Authors "] +[project] +name = "pandas_example" +version = "1.0.0" +description = "Federated Learning with Pandas and Flower (Quickstart Example)" +license = "Apache-2.0" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Ragy Haddad", email = "ragy202@gmail.com" }, +] +dependencies = [ + "flwr[simulation]>=1.11.1", + "flwr-datasets[vision]>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -numpy = "1.23.2" -pandas = "2.0.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pandas_example.server_app:app" +clientapp = "pandas_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-sample = 1.0 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 4 diff --git a/examples/quickstart-pandas/requirements.txt b/examples/quickstart-pandas/requirements.txt deleted file mode 100644 index d44a3c6adab9..000000000000 --- a/examples/quickstart-pandas/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -numpy==1.23.2 -pandas==2.0.0 diff --git a/examples/quickstart-pandas/run.sh b/examples/quickstart-pandas/run.sh deleted file mode 100755 index 2ae1e582b8cf..000000000000 --- a/examples/quickstart-pandas/run.sh +++ /dev/null @@ -1,13 +0,0 @@ -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id ${i} & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pandas/server.py b/examples/quickstart-pandas/server.py deleted file mode 100644 index 76cbd6194579..000000000000 --- a/examples/quickstart-pandas/server.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Dict, List, Optional, Tuple, Union - -import flwr as fl -import numpy as np -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - Parameters, - Scalar, - ndarrays_to_parameters, - parameters_to_ndarrays, -) -from flwr.server.client_manager import ClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.strategy import Strategy - - -class FedAnalytics(Strategy): - def initialize_parameters( - self, client_manager: Optional[ClientManager] = None - ) -> Optional[Parameters]: - return None - - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - config = {} - fit_ins = FitIns(parameters, config) - clients = client_manager.sample(num_clients=2, min_num_clients=2) - return [(client, fit_ins) for client in clients] - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - # Get results from fit - # Convert results - values_aggregated = [ - (parameters_to_ndarrays(fit_res.parameters)) for _, fit_res in results - ] - length_agg_hist = 0 - width_agg_hist = 0 - for val in values_aggregated: - length_agg_hist += val[0] - width_agg_hist += val[1] - - ndarr = np.concatenate( - (["Length:"], length_agg_hist, ["Width:"], width_agg_hist) - ) - return ndarrays_to_parameters(ndarr), {} - - def evaluate( - self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: - agg_hist = [arr.item() for arr in parameters_to_ndarrays(parameters)] - return 0, {"Aggregated histograms": agg_hist} - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - pass - - def aggregate_evaluate( - self, - server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: - pass - - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=1), - strategy=FedAnalytics(), -) diff --git a/examples/quickstart-pytorch-lightning/README.md b/examples/quickstart-pytorch-lightning/README.md index e520be856962..0aa34db9af75 100644 --- a/examples/quickstart-pytorch-lightning/README.md +++ b/examples/quickstart-pytorch-lightning/README.md @@ -52,7 +52,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,max-epochs=2 +flwr run . --run-config "num-server-rounds=5 max-epochs=2" ``` ### Run with the Deployment Engine diff --git a/examples/quickstart-pytorch-lightning/pyproject.toml b/examples/quickstart-pytorch-lightning/pyproject.toml index 482fc1356527..c5537ac6fcbe 100644 --- a/examples/quickstart-pytorch-lightning/pyproject.toml +++ b/examples/quickstart-pytorch-lightning/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch Lightning and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "pytorch-lightning<2.0.0; sys_platform == 'darwin'", "pytorch-lightning==1.6.0; sys_platform != 'darwin'", diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index e37d49194b01..d07f83a7ea85 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -55,7 +55,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index 29414962ba6b..98f02626a429 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/quickstart-tabnet/pyproject.toml b/examples/quickstart-tabnet/pyproject.toml index 6b7311f068f0..8345d6bd3da2 100644 --- a/examples/quickstart-tabnet/pyproject.toml +++ b/examples/quickstart-tabnet/pyproject.toml @@ -9,7 +9,7 @@ description = "Tabnet Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index f1fa12a3393c..a162e756d799 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -56,7 +56,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] diff --git a/examples/quickstart-tensorflow/tfexample/client_app.py b/examples/quickstart-tensorflow/tfexample/client_app.py index 05bf15e074c2..fcea79ba7391 100644 --- a/examples/quickstart-tensorflow/tfexample/client_app.py +++ b/examples/quickstart-tensorflow/tfexample/client_app.py @@ -21,10 +21,6 @@ def __init__( self.batch_size = batch_size self.verbose = verbose - def get_parameters(self, config): - """Return the parameters of the model of this client.""" - return self.model.get_weights() - def fit(self, parameters, config): """Train the model with data of this client.""" self.model.set_weights(parameters) diff --git a/examples/quickstart-tensorflow/tfexample/server_app.py b/examples/quickstart-tensorflow/tfexample/server_app.py index 053e92588e67..a09ceccfb3f2 100644 --- a/examples/quickstart-tensorflow/tfexample/server_app.py +++ b/examples/quickstart-tensorflow/tfexample/server_app.py @@ -22,7 +22,6 @@ def server_fn(context: Context): """Construct components that set the ServerApp behaviour.""" # Let's define the global model and pass it to the strategy - # Note this is optional. parameters = ndarrays_to_parameters(load_model().get_weights()) # Define the strategy diff --git a/examples/simulation-pytorch/README.md b/examples/simulation-pytorch/README.md deleted file mode 100644 index 2dbfbc849ab7..000000000000 --- a/examples/simulation-pytorch/README.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -tags: [basic, vision, fds, simulation] -dataset: [MNIST] -framework: [torch, torchvision] ---- - -# Flower Simulation example using PyTorch - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-pytorch/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-pytorch . && rm -rf flower && cd simulation-pytorch -``` - -This will create a new directory called `simulation-pytorch` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- utils.py <- auxiliary functions for this example --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 1xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 -``` - -You can change the default resources assigned to each `ClientApp` by means of the `--backend-config` argument: - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-pytorch/pyproject.toml b/examples/simulation-pytorch/pyproject.toml deleted file mode 100644 index 5978c17f2c60..000000000000 --- a/examples/simulation-pytorch/pyproject.toml +++ /dev/null @@ -1,19 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-pytorch" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and PyTorch" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.1.1" -torchvision = "0.16.1" - -[tool.poetry.group.dev.dependencies] -ipykernel = "^6.27.0" diff --git a/examples/simulation-pytorch/requirements.txt b/examples/simulation-pytorch/requirements.txt deleted file mode 100644 index 4dbecab3e546..000000000000 --- a/examples/simulation-pytorch/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.1 -torchvision==0.16.1 -flwr-datasets[vision]>=0.0.2, <1.0.0 \ No newline at end of file diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb deleted file mode 100644 index d225069cb444..000000000000 --- a/examples/simulation-pytorch/sim.ipynb +++ /dev/null @@ -1,629 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Environment Setup\n", - "\n", - "To start working with Flower, very little is required once you have activated your Python environment (e.g. via `conda`, `virtualenv`, `pyenv`, etc). If you are running this code on Colab, there is really nothing to do except to install Flower and other dependencies. The steps below have been verified to run in Colab.\n", - "\n", - "## Installing Flower\n", - "\n", - "You can install flower very conveniently from `pip`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# depending on your shell, you might need to add `\\` before `[` and `]`.\n", - "!pip install -q flwr[simulation]\n", - "!pip install flwr_datasets[vision]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will be using the _simulation_ mode in Flower, which allows you to run a large number of clients without the overheads of manually managing devices. This is achieved via the [Virtual Client Engine](https://flower.ai/docs/framework/how-to-run-simulations.html) in Flower. With simulation, you can dynamically scale your experiments whether you run the code on your laptop, a machine with a single GPU, a server with multiple GPUs os even on a cluster with multiple servers. The `Virtual Client Engine` handles everything transparently and it allows you to specify how many resources (e.g. CPU cores, GPU VRAM) should be assigned to each virtual client." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "Flower is agnostic to your choice of ML Framework. Flower works with `PyTorch`, `Tensorflow`, `NumPy`, `🤗 Transformers`, `MXNet`, `JAX`, `scikit-learn`, `fastai`, `Pandas`. Flower also supports all major platforms: `iOS`, `Android` and plain `C++`. You can find a _quickstart-_ example for each of the above in the [Flower Repository](https://github.com/adap/flower/tree/main/examples) inside the `examples/` directory.\n", - "\n", - "In this tutorial we are going to use PyTorch, it comes pre-installed in your Collab runtime so there is no need to installed it again. If you wouuld like to install another version, you can still do that in the same way other packages are installed via `!pip`" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are going to install some other dependencies you are likely familiar with. Let's install `maplotlib` to plot our results at the end." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "58b7af77-609f-4118-bd5b-5629a4b5a296" - }, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Preparing the experiment\n", - "\n", - "This tutorial is not so much about novel architectural designs so we keep things simple and make use of a typical CNN that is adequate for the MNIST image classification task.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, num_classes: int) -> None:\n", - " super(Net, self).__init__()\n", - " self.conv1 = nn.Conv2d(1, 6, 5)\n", - " self.pool = nn.MaxPool2d(2, 2)\n", - " self.conv2 = nn.Conv2d(6, 16, 5)\n", - " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", - " self.fc2 = nn.Linear(120, 84)\n", - " self.fc3 = nn.Linear(84, num_classes)\n", - "\n", - " def forward(self, x: torch.Tensor) -> torch.Tensor:\n", - " x = self.pool(F.relu(self.conv1(x)))\n", - " x = self.pool(F.relu(self.conv2(x)))\n", - " x = x.view(-1, 16 * 4 * 4)\n", - " x = F.relu(self.fc1(x))\n", - " x = F.relu(self.fc2(x))\n", - " x = self.fc3(x)\n", - " return x" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We'll be training the model in a Federated setting. In order to do that, we need to define two functions:\n", - "\n", - "* `train()` that will train the model given a dataloader.\n", - "* `test()` that will be used to evaluate the performance of the model on held-out data, e.g., a training set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def train(net, trainloader, optim, epochs, device: str):\n", - " \"\"\"Train the network on the training set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " net.train()\n", - " for _ in range(epochs):\n", - " for batch in trainloader:\n", - " images, labels = batch[\"image\"].to(device), batch[\"label\"].to(device)\n", - " optim.zero_grad()\n", - " loss = criterion(net(images), labels)\n", - " loss.backward()\n", - " optim.step()\n", - "\n", - "\n", - "def test(net, testloader, device: str):\n", - " \"\"\"Validate the network on the entire test set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " correct, loss = 0, 0.0\n", - " net.eval()\n", - " with torch.no_grad():\n", - " for data in testloader:\n", - " images, labels = data[\"image\"].to(device), data[\"label\"].to(device)\n", - " outputs = net(images)\n", - " loss += criterion(outputs, labels).item()\n", - " _, predicted = torch.max(outputs.data, 1)\n", - " correct += (predicted == labels).sum().item()\n", - " accuracy = correct / len(testloader.dataset)\n", - " return loss, accuracy" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The code we have written so far is not specific to Federated Learning. Then, what are the key differences between Federated Learning and Centralised Training? If you could only pick you, probably you'd say:\n", - "* Federated Learning is distributed -- the model is trained on-device by the participating clients.\n", - "* Data remains private and is owned by a specific _client_ -- the data is never sent to the central server.\n", - "\n", - "The are several more differences. But the above two are the main ones to always consider and that are common to all flavours of Federated Learning (e.g. _cross-device_ or _cross-silo_). The remaining of this tutorial is going to focus in transforming the code we have written so far for the centralised setting and construct a Federated Learning pipeline using Flower and PyTorch.\n", - "\n", - "Let's begin! 🚀" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One Client, One Data Partition\n", - "\n", - "To start designing a Federated Learning pipeline we need to meet one of the key properties in FL: each client has its own data partition. To accomplish this with the MNIST dataset, we are going to generate N random partitions, where N is the total number of clients in our FL system.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "from datasets.utils.logging import disable_progress_bar\n", - "\n", - "# Let's set a simulation involving a total of 100 clients\n", - "NUM_CLIENTS = 100\n", - "\n", - "# Download MNIST dataset and partition the \"train\" partition (so one can be assigned to each client)\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Let's keep the test set as is, and use it to evaluate the global model on the server\n", - "centralized_testset = mnist_fds.load_split(\"test\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create a function that returns a set of transforms to apply to our images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torchvision.transforms import ToTensor, Normalize, Compose\n", - "\n", - "\n", - "def apply_transforms(batch):\n", - " \"\"\"Get transformation for MNIST dataset\"\"\"\n", - "\n", - " # transformation to convert images to tensors and apply normalization\n", - " transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n", - " batch[\"image\"] = [transforms(img) for img in batch[\"image\"]]\n", - " return batch" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's next define how our FL clients will behave.\n", - "\n", - "## Defining a Flower Client\n", - "\n", - "You can think of a client in FL as an entity that owns some data and trains a model using this data. The caveat is that the model is being trained _collaboratively_ in Federation by multiple clients (sometimes up to hundreds of thousands) and, in most instances of FL, is sent by a central server.\n", - "\n", - "A Flower Client is a simple Python class with four distinct methods:\n", - "\n", - "* `fit()`: With this method, the client does on-device training for a number of epochs using its own data. At the end, the resulting model is sent back to the server for aggregation.\n", - "\n", - "* `evaluate()`: With this method, the server can evaluate the performance of the global model on the local validation set of a client. This can be used for instance when there is no centralised dataset on the server for validation/test. Also, this method can be use to asses the degree of personalisation of the model being federated.\n", - "\n", - "* `set_parameters()`: This method takes the parameters sent by the server and uses them to initialise the parameters of the local model that is ML framework specific (e.g. TF, Pytorch, etc).\n", - "\n", - "* `get_parameters()`: It extract the parameters from the local model and transforms them into a list of NumPy arrays. This ML framework-agnostic representation of the model will be sent to the server.\n", - "\n", - "Let's start by importing Flower!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import flwr as fl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's defice our Flower Client class:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from collections import OrderedDict\n", - "from typing import Dict, List, Tuple\n", - "\n", - "from flwr.common import NDArrays, Scalar\n", - "\n", - "\n", - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainloader, valloader) -> None:\n", - " super().__init__()\n", - "\n", - " self.trainloader = trainloader\n", - " self.valloader = valloader\n", - " self.model = Net(num_classes=10)\n", - " # Determine device\n", - " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " self.model.to(self.device) # send model to device\n", - "\n", - " def set_parameters(self, parameters):\n", - " \"\"\"With the model parameters received from the server,\n", - " overwrite the uninitialise model in this class with them.\"\"\"\n", - "\n", - " params_dict = zip(self.model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " # now replace the parameters\n", - " self.model.load_state_dict(state_dict, strict=True)\n", - "\n", - " def get_parameters(self, config: Dict[str, Scalar]):\n", - " \"\"\"Extract all model parameters and conver them to a list of\n", - " NumPy arryas. The server doesn't work with PyTorch/TF/etc.\"\"\"\n", - " return [val.cpu().numpy() for _, val in self.model.state_dict().items()]\n", - "\n", - " def fit(self, parameters, config):\n", - " \"\"\"This method train the model using the parameters sent by the\n", - " server on the dataset of this client. At then end, the parameters\n", - " of the locally trained model are communicated back to the server\"\"\"\n", - "\n", - " # copy parameters sent by the server into client's local model\n", - " self.set_parameters(parameters)\n", - "\n", - " # read from config\n", - " lr, epochs = config[\"lr\"], config[\"epochs\"]\n", - "\n", - " # Define the optimizer\n", - " optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.9)\n", - "\n", - " # do local training\n", - " train(self.model, self.trainloader, optim, epochs=epochs, device=self.device)\n", - "\n", - " # return the model parameters to the server as well as extra info (number of training examples in this case)\n", - " return self.get_parameters({}), len(self.trainloader), {}\n", - "\n", - " def evaluate(self, parameters: NDArrays, config: Dict[str, Scalar]):\n", - " \"\"\"Evaluate the model sent by the server on this client's\n", - " local validation set. Then return performance metrics.\"\"\"\n", - "\n", - " self.set_parameters(parameters)\n", - " loss, accuracy = test(self.model, self.valloader, device=self.device)\n", - " # send statistics back to the server\n", - " return float(loss), len(self.valloader), {\"accuracy\": accuracy}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Spend a few minutes to inspect the `FlowerClient` class above. Please ask questions if there is something unclear !\n", - "\n", - "Then keen-eyed among you might have realised that if we were to fuse the client's `fit()` and `evaluate()` methods, we'll end up with essentially the same as in the `run_centralised()` function we used in the Centralised Training part of this tutorial. And it is true!! In Federated Learning, the way clients perform local training makes use of the same principles as more traditional centralised setup. The key difference is that the dataset now is much smaller and it's never _\"seen\"_ by the entity running the FL workload (i.e. the central server).\n", - "\n", - "\n", - "Talking about the central server... we should define what strategy we want to make use of so the updated models sent from the clients back to the server at the end of the `fit()` method are aggregate.\n", - "\n", - "\n", - "## Choosing a Flower Strategy\n", - "\n", - "\n", - "A strategy sits at the core of the Federated Learning experiment. It is involved in all stages of a FL pipeline: sampling clients; sending the _global model_ to the clients so they can do `fit()`; receive the updated models from the clients and **aggregate** these to construct a new _global model_; define and execute global or federated evaluation; and more.\n", - "\n", - "Flower comes with [many strategies built-in](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy) and more to be available in the next release (`1.5` already!). For this tutorial, let's use what is arguable the most popular strategy out there: `FedAvg`.\n", - "\n", - "The way `FedAvg` works is simple but performs surprisingly well in practice. It is therefore one good strategy to start your experimentation. `FedAvg`, as its name implies, derives a new version of the _global model_ by taking the average of all the models sent by clients participating in the round. You can read all the details [in the paper](https://arxiv.org/abs/1602.05629).\n", - "\n", - "Let's see how we can define `FedAvg` using Flower. We use one of the callbacks called `evaluate_fn` so we can easily evaluate the state of the global model using a small centralised testset. Note this functionality is user-defined since it requires a choice in terms of ML-framework. (if you recall, Flower is framework agnostic).\n", - "\n", - "> This being said, centralised evaluation of the global model is only possible if there exists a centralised dataset that somewhat follows a similar distribution as the data that's spread across clients. In some cases having such centralised dataset for validation is not possible, so the only solution is to federate the evaluation of the _global model_. This is the default behaviour in Flower. If you don't specify teh `evaluate_fn` argument in your strategy, then, centralised global evaluation won't be performed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_evaluate_fn(centralized_testset: Dataset):\n", - " \"\"\"This is a function that returns a function. The returned\n", - " function (i.e. `evaluate_fn`) will be executed by the strategy\n", - " at the end of each round to evaluate the stat of the global\n", - " model.\"\"\"\n", - "\n", - " def evaluate_fn(server_round: int, parameters, config):\n", - " \"\"\"This function is executed by the strategy it will instantiate\n", - " a model and replace its parameters with those from the global model.\n", - " The, the model will be evaluate on the test set (recall this is the\n", - " whole MNIST test set).\"\"\"\n", - "\n", - " model = Net(num_classes=10)\n", - "\n", - " # Determine device\n", - " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " model.to(device) # send model to device\n", - "\n", - " # set parameters to the model\n", - " params_dict = zip(model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " model.load_state_dict(state_dict, strict=True)\n", - "\n", - " # Apply transform to dataset\n", - " testset = centralized_testset.with_transform(apply_transforms)\n", - "\n", - " testloader = DataLoader(testset, batch_size=50)\n", - " # call test\n", - " loss, accuracy = test(model, testloader, device)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate_fn" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We could now define a strategy just as shown (commented) above. Instead, let's see how additional (but entirely optional) functionality can be easily added to our strategy. We are going to define two additional auxiliary functions to: (1) be able to configure how clients do local training; and (2) define a function to aggregate the metrics that clients return after running their `evaluate` methods:\n", - "\n", - "1. `fit_config()`. This is a function that will be executed inside the strategy when configuring a new `fit` round. This function is relatively simple and only requires as input argument the round at which the FL experiment is at. In this example we simply return a Python dictionary to specify the number of epochs and learning rate each client should made use of inside their `fit()` methods. A more versatile implementation would add more hyperparameters (e.g. the learning rate) and adjust them as the FL process advances (e.g. reducing the learning rate in later FL rounds).\n", - "2. `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flwr.common import Metrics\n", - "\n", - "\n", - "def fit_config(server_round: int) -> Dict[str, Scalar]:\n", - " \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n", - " config = {\n", - " \"epochs\": 1, # Number of local epochs done by clients\n", - " \"lr\": 0.01, # Learning rate to use by clients during fit()\n", - " }\n", - " return config\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can define our strategy:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " on_fit_config_fn=fit_config,\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So far we have:\n", - "* created the dataset partitions (one for each client)\n", - "* defined the client class\n", - "* decided on a strategy to use\n", - "\n", - "Now we just need to launch the Flower FL experiment... not so fast! just one final function: let's create another callback that the Simulation Engine will use in order to span VirtualClients. As you can see this is really simple: construct a FlowerClient object, assigning each their own data partition." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Let's get the partition corresponding to the i-th client\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"]\n", - " valset = client_dataset_splits[\"test\"]\n", - "\n", - " # Now we apply the transform to each batch.\n", - " trainloader = DataLoader(\n", - " trainset.with_transform(apply_transforms), batch_size=32, shuffle=True\n", - " )\n", - " valloader = DataLoader(valset.with_transform(apply_transforms), batch_size=32)\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainloader, valloader).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "client_fn_callback = get_client_fn(mnist_fds)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to launch the FL experiment using Flower simulation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "9ad8dcea-8004-4c6e-a025-e168da636c88" - }, - "outputs": [], - "source": [ - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Let's disable tqdm progress bar in the main thread (used by the server)\n", - "disable_progress_bar()\n", - "\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=client_fn_callback, # a callback to construct a client\n", - " num_clients=NUM_CLIENTS, # total number of clients in the experiment\n", - " config=fl.server.ServerConfig(num_rounds=10), # let's run for 10 rounds\n", - " strategy=strategy, # the strategy that will orchestrate the whole FL pipeline\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Doing 10 rounds should take less than 2 minutes on a CPU-only Colab instance <-- Flower Simulation is fast! 🚀\n", - "\n", - "You can then use the resturned `History` object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the _global model_. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 508 - }, - "outputId": "d8eab106-cee9-4266-9082-0944882cdba8" - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/\n" - ] - } - ], - "metadata": { - "colab": { - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py deleted file mode 100644 index a435db6d7724..000000000000 --- a/examples/simulation-pytorch/sim.py +++ /dev/null @@ -1,225 +0,0 @@ -import argparse -from collections import OrderedDict -from typing import Dict, List, Tuple - -import flwr as fl -import torch -from datasets import Dataset -from datasets.utils.logging import disable_progress_bar -from flwr.common import Metrics -from flwr.common.typing import Scalar -from flwr_datasets import FederatedDataset -from torch.utils.data import DataLoader - -from utils import Net, apply_transforms, test, train - -parser = argparse.ArgumentParser(description="Flower Simulation with PyTorch") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 - - -# Flower client, adapted from Pytorch quickstart example -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset): - self.trainset = trainset - self.valset = valset - - # Instantiate model - self.model = Net() - - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def fit(self, parameters, config): - set_params(self.model, parameters) - - # Read from config - batch, epochs = config["batch_size"], config["epochs"] - - # Construct dataloader - trainloader = DataLoader(self.trainset, batch_size=batch, shuffle=True) - - # Define optimizer - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9) - # Train - train(self.model, trainloader, optimizer, epochs=epochs, device=self.device) - - # Return local model and statistics - return self.get_parameters({}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - set_params(self.model, parameters) - - # Construct dataloader - valloader = DataLoader(self.valset, batch_size=64) - - # Evaluate - loss, accuracy = test(self.model, valloader, device=self.device) - - # Return statistics - return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(context) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Let's get the partition corresponding to the i-th client - client_dataset = dataset.load_partition( - int(context.node_config["partition-id"]), "train" - ) - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"] - valset = client_dataset_splits["test"] - - # Now we apply the transform to each batch. - trainset = trainset.with_transform(apply_transforms) - valset = valset.with_transform(apply_transforms) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def fit_config(server_round: int) -> Dict[str, Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config = { - "epochs": 1, # Number of local epochs done by clients - "batch_size": 32, # Batch size to use by clients during fit() - } - return config - - -def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]): - """Set model weights from a list of NumPy ndarrays.""" - params_dict = zip(model.state_dict().keys(), params) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics, i.e. those returned by - the client's evaluate() method.""" - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn( - centralized_testset: Dataset, -): - """Return an evaluation function for centralized evaluation.""" - - def evaluate( - server_round: int, parameters: fl.common.NDArrays, config: Dict[str, Scalar] - ): - """Use the entire CIFAR-10 test set for evaluation.""" - - # Determine device - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - model = Net() - set_params(model, parameters) - model.to(device) - - # Apply transform to dataset - testset = centralized_testset.with_transform(apply_transforms) - - # Disable tqdm for dataset preprocessing - disable_progress_bar() - - testloader = DataLoader(testset, batch_size=50) - loss, accuracy = test(model, testloader, device=device) - - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -centralized_testset = mnist_fds.load_split("test") - -from flwr.server import ServerAppComponents - - -def server_fn(context): - # Configure the strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_available_clients=10, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=weighted_average, # Aggregate federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # Global evaluation function - ) - return ServerAppComponents( - strategy=strategy, config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS) - ) - - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp(server_fn=server_fn) - - -def main(): - # Parse input arguments - args = parser.parse_args() - - # Resources to be assigned to each virtual client - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, - actor_kwargs={ - "on_actor_init_fn": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients - }, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/simulation-pytorch/utils.py b/examples/simulation-pytorch/utils.py deleted file mode 100644 index 702e9886615e..000000000000 --- a/examples/simulation-pytorch/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision.transforms import Compose, Normalize, ToTensor - - -# transformation to convert images to tensors and apply normalization -def apply_transforms(batch): - transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch - - -# Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz') -class Net(nn.Module): - def __init__(self, num_classes: int = 10) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 4 * 4, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, num_classes) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 4 * 4) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -# borrowed from Pytorch quickstart example -def train(net, trainloader, optim, epochs, device: str): - """Train the network on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["image"].to(device), batch["label"].to(device) - optim.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optim.step() - - -# borrowed from Pytorch quickstart example -def test(net, testloader, device: str): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for data in testloader: - images, labels = data["image"].to(device), data["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy diff --git a/examples/simulation-tensorflow/README.md b/examples/simulation-tensorflow/README.md deleted file mode 100644 index 047cb4379659..000000000000 --- a/examples/simulation-tensorflow/README.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -tags: [basic, vision, fds, simulation] -dataset: [MNIST] -framework: [tensorflow, Keras] ---- - -# Flower Simulation example using TensorFlow/Keras - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-tensorflow/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-tensorflow . && rm -rf flower && cd simulation-tensorflow -``` - -This will create a new directory called `simulation-tensorflow` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 2xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -Because TensorFlow by default maps all the available VRAM, we need to [enable GPU memory growth](https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth), see how it is done in the example (`sim.py`) for both the "main" process (where the server/strategy runs) and for the clients (using the `actor_kwargs`) - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). For TensorFlow simulations, it is desirable to make use of TF's [memory growth](https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_memory_growth) feature. You can enable that easily with the `--enable-tf-gpu-growth` flag. - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 --enable-tf-gpu-growth -``` - -You can change the default resources assigned to each `ClientApp` using the `--backend-config` argument. - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' --enable-tf-gpu-growth -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-tensorflow/pyproject.toml b/examples/simulation-tensorflow/pyproject.toml deleted file mode 100644 index ad8cc2032b2d..000000000000 --- a/examples/simulation-tensorflow/pyproject.toml +++ /dev/null @@ -1,16 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-tensorflow" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and Tensorflow" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow = { version = "^2.9.1, !=2.11.1", markers = "platform_machine == 'x86_64'" } -tensorflow-macos = { version = "^2.9.1, !=2.11.1", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'" } diff --git a/examples/simulation-tensorflow/requirements.txt b/examples/simulation-tensorflow/requirements.txt deleted file mode 100644 index bb69a87be1b4..000000000000 --- a/examples/simulation-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb deleted file mode 100644 index 26b7260b5f1c..000000000000 --- a/examples/simulation-tensorflow/sim.ipynb +++ /dev/null @@ -1,347 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Flower Quickstart (Simulation with TensorFlow/Keras)\n", - "\n", - "Welcome to Flower, a friendly federated learning framework!\n", - "\n", - "In this notebook, we'll simulate a federated learning system with 100 clients. The clients will use TensorFlow/Keras to define model training and evaluation. Let's start by installing Flower (published as `flwr` on PyPI) with the `simulation` extra:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install -q flwr[\"simulation\"] tensorflow\n", - "!pip install -q flwr_datasets[\"vision\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's also install Matplotlib so we can make some plots once the simulation is completed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we import the required dependencies. The most important imports are Flower (`flwr`) and TensorFlow:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Dict, List, Tuple\n", - "\n", - "import tensorflow as tf\n", - "\n", - "import flwr as fl\n", - "from flwr.common import Metrics\n", - "from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth\n", - "\n", - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "\n", - "VERBOSE = 0\n", - "NUM_CLIENTS = 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's start by defining the model we want to federated. Since we will be working with MNIST, using a fully connected model is sufficient. You can of course customize this model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_model():\n", - " \"\"\"Constructs a simple model architecture suitable for MNIST.\"\"\"\n", - " model = tf.keras.models.Sequential(\n", - " [\n", - " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", - " tf.keras.layers.Dense(128, activation=\"relu\"),\n", - " tf.keras.layers.Dropout(0.2),\n", - " tf.keras.layers.Dense(10, activation=\"softmax\"),\n", - " ]\n", - " )\n", - " model.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n", - " return model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With that out of the way, let's move on to the interesting bits. Federated learning systems consist of a server and multiple clients. In Flower, we create clients by implementing subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate.\n", - "\n", - "To implement the Flower client, we create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`:\n", - "\n", - "- `get_parameters`: Return the current local model parameters\n", - "- `fit`: Receive model parameters from the server, train the model parameters on the local data, and return the (updated) model parameters to the server \n", - "- `evaluate`: Received model parameters from the server, evaluate the model parameters on the local data, and return the evaluation result to the server\n", - "\n", - "We mentioned that our clients will use TensorFlow/Keras for the model training and evaluation. Keras models provide methods that make the implementation straightforward: we can update the local model with server-provides parameters through `model.set_weights`, we can train/evaluate the model through `fit/evaluate`, and we can get the updated model parameters through `model.get_weights`.\n", - "\n", - "Let's see a simple implementation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainset, valset) -> None:\n", - " # Create model\n", - " self.model = get_model()\n", - " self.trainset = trainset\n", - " self.valset = valset\n", - "\n", - " def get_parameters(self, config):\n", - " return self.model.get_weights()\n", - "\n", - " def fit(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " self.model.fit(self.trainset, epochs=1, verbose=VERBOSE)\n", - " return self.model.get_weights(), len(self.trainset), {}\n", - "\n", - " def evaluate(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE)\n", - " return loss, len(self.valset), {\"accuracy\": acc}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise, there's not much to federate, is there?), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, we'd have three instances of `FlowerClient`. Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation).\n", - "\n", - "In this notebook, we want to simulate a federated learning system with 100 clients on a single machine. This means that the server and all 100 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 100 clients would mean having 100 instances of `FlowerClient` in memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning.\n", - "\n", - "In addition to the regular capabilities where server and clients run on multiple machines, Flower, therefore, provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function called `client_fn` that creates a `FlowerClient` instance on demand. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use). Clients are identified by a client ID, or short `cid`. The `cid` can be used, for example, to load different local data partitions for each client" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now define four auxiliary functions for this example (note the last two are entirely optional):\n", - "* `get_client_fn()`: Is a function that returns another function. The returned `client_fn` will be executed by Flower's VirtualClientEngine each time a new _virtual_ client (i.e. a client that is simulated in a Python process) needs to be spawn. When are virtual clients spawned? Each time the strategy samples them to do either `fit()` (i.e. train the global model on the local data of a particular client) or `evaluate()` (i.e. evaluate the global model on the validation set of a given client).\n", - "\n", - "* `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`.\n", - "\n", - "* `get_evaluate_fn()`: This is again a function that returns another function. The returned function will be executed by the strategy at the end of a `fit()` round and after a new global model has been obtained after aggregation. This is an optional argument for Flower strategies. In this example, we use the whole MNIST test set to perform this server-side evaluation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Extract partition for client with id = cid\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=32\n", - " )\n", - " valset = client_dataset_splits[\"test\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - " )\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainset, valset).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}\n", - "\n", - "\n", - "def get_evaluate_fn(testset: Dataset):\n", - " \"\"\"Return an evaluation function for server-side (i.e. centralised) evaluation.\"\"\"\n", - "\n", - " # The `evaluate` function will be called after every round by the strategy\n", - " def evaluate(\n", - " server_round: int,\n", - " parameters: fl.common.NDArrays,\n", - " config: Dict[str, fl.common.Scalar],\n", - " ):\n", - " model = get_model() # Construct the model\n", - " model.set_weights(parameters) # Update model with the latest parameters\n", - " loss, accuracy = model.evaluate(testset, verbose=VERBOSE)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now have `FlowerClient` which defines client-side training and evaluation, and `client_fn`, which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. The last step is to start the actual simulation using `flwr.simulation.start_simulation`. \n", - "\n", - "The function `start_simulation` accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate `num_clients`, the number of rounds `num_rounds`, and the strategy. The strategy encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg).\n", - "\n", - "Flower comes with a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters. The last step is the actual call to `start_simulation` which - you guessed it - actually starts the simulation.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Enable GPU growth in your main process\n", - "enable_tf_gpu_growth()\n", - "\n", - "# Download MNIST dataset and partition it\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Get the whole test set for centralised evaluation\n", - "centralized_testset = mnist_fds.load_split(\"test\").to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - ")\n", - "\n", - "\n", - "# Create FedAvg strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " min_fit_clients=10, # Never sample less than 10 clients for training\n", - " min_evaluate_clients=5, # Never sample less than 5 clients for evaluation\n", - " min_available_clients=int(\n", - " NUM_CLIENTS * 0.75\n", - " ), # Wait until at least 75 clients are available\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")\n", - "\n", - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Start simulation\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=get_client_fn(mnist_fds),\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=10),\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": enable_tf_gpu_growth # Enable GPU growth upon actor init.\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can then use the resturned History object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the global model. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/" - ] - } - ], - "metadata": { - "colab": { - "name": "flower.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py deleted file mode 100644 index 1ae2db41ab4b..000000000000 --- a/examples/simulation-tensorflow/sim.py +++ /dev/null @@ -1,186 +0,0 @@ -import argparse -import os -from typing import Dict, List, Tuple - -import flwr as fl -import tensorflow as tf -from datasets import Dataset -from flwr.common import Metrics -from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth -from flwr_datasets import FederatedDataset - -# Make TensorFlow logs less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -parser = argparse.ArgumentParser(description="Flower Simulation with Tensorflow/Keras") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 -VERBOSE = 0 - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset) -> None: - # Create model - self.model = get_model() - self.trainset = trainset - self.valset = valset - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - self.model.fit(self.trainset, epochs=1, verbose=VERBOSE) - return self.model.get_weights(), len(self.trainset), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE) - return loss, len(self.valset), {"accuracy": acc} - - -def get_model(): - """Constructs a simple model architecture suitable for MNIST.""" - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - return model - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Extract partition for client with id = cid - client_dataset = dataset.load_partition(int(cid), "train") - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"].to_tf_dataset( - columns="image", label_cols="label", batch_size=32 - ) - valset = client_dataset_splits["test"].to_tf_dataset( - columns="image", label_cols="label", batch_size=64 - ) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics. - - It ill aggregate those metrics returned by the client's evaluate() method. - """ - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn(testset: Dataset): - """Return an evaluation function for server-side (i.e. centralised) evaluation.""" - - # The `evaluate` function will be called after every round by the strategy - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ): - model = get_model() # Construct the model - model.set_weights(parameters) # Update model with the latest parameters - loss, accuracy = model.evaluate(testset, verbose=VERBOSE) - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -# Get the whole test set for centralised evaluation -centralized_testset = mnist_fds.load_split("test").to_tf_dataset( - columns="image", label_cols="label", batch_size=64 -) - -# Create FedAvg strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_fit_clients=10, # Never sample less than 10 clients for training - evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function -) - - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) - - -def main() -> None: - # Parse input arguments - args = parser.parse_args() - - # With a dictionary, you tell Flower's VirtualClientEngine that each - # client needs exclusive access to these many resources in order to run - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - config=fl.server.ServerConfig(NUM_ROUNDS), - strategy=strategy, - client_resources=client_resources, - actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # Enable GPU growth upon actor init - # does nothing if `num_gpus` in client_resources is 0.0 - }, - ) - - -if __name__ == "__main__": - # Enable GPU growth in your main process - enable_tf_gpu_growth() - main() diff --git a/examples/sklearn-logreg-mnist/README.md b/examples/sklearn-logreg-mnist/README.md index b56dbfc5dd3a..7c75e2ecfb85 100644 --- a/examples/sklearn-logreg-mnist/README.md +++ b/examples/sklearn-logreg-mnist/README.md @@ -55,7 +55,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,fraction-fit=0.25 +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.25" ``` > \[!TIP\] diff --git a/examples/sklearn-logreg-mnist/pyproject.toml b/examples/sklearn-logreg-mnist/pyproject.toml index be1e4810b312..937f05e35eda 100644 --- a/examples/sklearn-logreg-mnist/pyproject.toml +++ b/examples/sklearn-logreg-mnist/pyproject.toml @@ -12,7 +12,7 @@ authors = [ { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets[vision]>=0.3.0", "numpy<2.0.0", "scikit-learn~=1.2.2", diff --git a/examples/tensorflow-privacy/README.md b/examples/tensorflow-privacy/README.md index 8156f92f60c9..af85865346bb 100644 --- a/examples/tensorflow-privacy/README.md +++ b/examples/tensorflow-privacy/README.md @@ -1,66 +1,64 @@ --- -tags: [basic, vision, fds, privacy, dp] +tags: [DP, DP-SGD, basic, vision, fds, privacy] dataset: [MNIST] framework: [tensorflow] --- # Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine -In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy Engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. +In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about tensorflow-privacy, visit the official [website](https://www.tensorflow.org/responsible_ai/privacy/guide). -## Environments Setup +## Set up the project -Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project + +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/tensorflow-privacy . && rm -rf flower && cd tensorflow-privacy +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/tensorflow-privacy . \ + && rm -rf flower \ + && cd tensorflow-privacy ``` This will create a new directory called `tensorflow-privacy` containing the following files: ```shell --- pyproject.toml --- client.py --- server.py --- README.md +tensorflow-privacy +├── tf_privacy +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing dependencies - -Project dependencies are defined in `pyproject.toml`. Install them with: - -```shell -pip install . -``` +> \[!NOTE\] +> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s. -## Run Flower with tensorflow-privacy and TensorFlow +### Install dependencies and project -### 1. Start the long-running Flower server (SuperLink) +Install the dependencies defined in `pyproject.toml` as well as the `tf_privacy` package. -```bash -flower-superlink --insecure +```shell +# From a new python environment, run: +pip install -e . ``` -### 2. Start the long-running Flower clients (SuperNodes) +## Run the project -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```bash -flower-client-app client:appA --insecure -``` +### Run with the Simulation Engine ```bash -flower-client-app client:appB --insecure +flwr run . ``` -tensorflow-privacy hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. - -### 3. Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "l2-norm-clip=1.5 num-server-rounds=5" ``` diff --git a/examples/tensorflow-privacy/client.py b/examples/tensorflow-privacy/client.py deleted file mode 100644 index 85ed8a3d4245..000000000000 --- a/examples/tensorflow-privacy/client.py +++ /dev/null @@ -1,150 +0,0 @@ -import argparse -import os - -import tensorflow as tf -import tensorflow_privacy -from flwr.client import ClientApp, NumPyClient -from flwr_datasets import FederatedDataset -from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( - compute_dp_sgd_privacy_statement, -) - -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -def load_data(partition_id, batch_size): - fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) - partition = fds.load_partition(partition_id, "train") - partition.set_format("numpy") - - # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2, seed=42) - x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] - x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] - - # Adjust the size of the training dataset to make it evenly divisible by the batch size - remainder = len(x_train) % batch_size - if remainder != 0: - x_train = x_train[:-remainder] - y_train = y_train[:-remainder] - - return (x_train, y_train), (x_test, y_test) - - -class FlowerClient(NumPyClient): - def __init__( - self, - model, - train_data, - test_data, - l2_norm_clip, - noise_multiplier, - num_microbatches, - learning_rate, - batch_size, - ) -> None: - super().__init__() - self.model = model - self.x_train, self.y_train = train_data - self.x_test, self.y_test = test_data - self.noise_multiplier = noise_multiplier - self.l2_norm_clip = l2_norm_clip - self.num_microbatches = num_microbatches - self.learning_rate = learning_rate - self.batch_size = batch_size - if self.batch_size % self.num_microbatches != 0: - raise ValueError( - f"Batch size {self.batch_size} is not divisible by the number of microbatches {self.num_microbatches}" - ) - - self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( - l2_norm_clip=l2_norm_clip, - noise_multiplier=noise_multiplier, - num_microbatches=num_microbatches, - learning_rate=learning_rate, - ) - loss = tf.keras.losses.SparseCategoricalCrossentropy( - reduction=tf.losses.Reduction.NONE - ) - self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - - self.model.fit( - self.x_train, - self.y_train, - epochs=1, - batch_size=self.batch_size, - ) - - compute_dp_sgd_privacy_statement( - number_of_examples=self.x_train.shape[0], - batch_size=self.batch_size, - num_epochs=1, - noise_multiplier=self.noise_multiplier, - delta=1e-5, - ) - - return self.model.get_weights(), len(self.x_train), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - self.model.compile( - optimizer=self.optimizer, - loss="sparse_categorical_crossentropy", - metrics=["accuracy"], - ) - loss, accuracy = self.model.evaluate(self.x_test, self.y_test) - return loss, len(self.x_test), {"accuracy": accuracy} - - -def client_fn_parameterized( - partition_id, - noise_multiplier, - l2_norm_clip=1.0, - num_microbatches=64, - learning_rate=0.01, - batch_size=64, -): - def client_fn(cid: str): - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), - tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Flatten(), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - train_data, test_data = load_data( - partition_id=partition_id, batch_size=batch_size - ) - return FlowerClient( - model, - train_data, - test_data, - noise_multiplier, - l2_norm_clip, - num_microbatches, - learning_rate, - batch_size, - ).to_client() - - return client_fn - - -appA = ClientApp( - client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.0), -) - -appB = ClientApp( - client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1.5), -) diff --git a/examples/tensorflow-privacy/pyproject.toml b/examples/tensorflow-privacy/pyproject.toml index 884ba3b5f07b..48248cb31195 100644 --- a/examples/tensorflow-privacy/pyproject.toml +++ b/examples/tensorflow-privacy/pyproject.toml @@ -4,14 +4,11 @@ build-backend = "hatchling.build" [project] name = "tensorflow-privacy-fl" -version = "0.1.0" +version = "1.0.0" description = "Sample-level Differential Privacy with Tensorflow-Privacy in Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.1.0,<1.0.0", + "flwr[simulation]>=1.11.0", + "flwr-datasets[vision]>=0.3.0", "tensorflow-estimator~=2.4", "tensorflow-probability~=0.22.0", "tensorflow>=2.4.0,<=2.15.0", @@ -20,3 +17,23 @@ dependencies = [ [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "tf_privacy.server_app:app" +clientapp = "tf_privacy.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +l2-norm-clip = 1.0 +num-microbatches = 64 +learning-rate = 0.01 +batch-size = 64 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/tensorflow-privacy/server.py b/examples/tensorflow-privacy/server.py deleted file mode 100644 index 5b2ac6a3c4df..000000000000 --- a/examples/tensorflow-privacy/server.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Tuple - -from flwr.common import Metrics -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - return {"accuracy": sum(accuracies) / sum(examples)} - - -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -config = ServerConfig(num_rounds=3) - -app = ServerApp( - config=config, - strategy=strategy, -) diff --git a/examples/tensorflow-privacy/tf_privacy/__init__.py b/examples/tensorflow-privacy/tf_privacy/__init__.py new file mode 100644 index 000000000000..252b33cdd1c5 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/__init__.py @@ -0,0 +1 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" diff --git a/examples/tensorflow-privacy/tf_privacy/client_app.py b/examples/tensorflow-privacy/tf_privacy/client_app.py new file mode 100644 index 000000000000..977d98bbbe43 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/client_app.py @@ -0,0 +1,93 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import os + +import tensorflow as tf +import tensorflow_privacy +from flwr.client import ClientApp, NumPyClient +from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( + compute_dp_sgd_privacy_statement, +) +from flwr.common import Context + +from tf_privacy.task import load_data, load_model +import numpy as np + + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +class FlowerClient(NumPyClient): + def __init__( + self, + train_data, + test_data, + noise_multiplier, + run_config, + ) -> None: + super().__init__() + self.model = load_model() + self.x_train, self.y_train = train_data + self.x_train = np.expand_dims(self.x_train, axis=-1) + self.x_test, self.y_test = test_data + self.x_test = np.expand_dims(self.x_test, axis=-1) + self.noise_multiplier = noise_multiplier + self.run_config = run_config + if self.run_config["batch-size"] % self.run_config["num-microbatches"] != 0: + raise ValueError( + f"Batch size {self.run_config['batch-size']} is not divisible by the number of microbatches {self.run_config['num-microbatches']}" + ) + + self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( + l2_norm_clip=self.run_config["l2-norm-clip"], + noise_multiplier=self.noise_multiplier, + num_microbatches=self.run_config["num-microbatches"], + learning_rate=self.run_config["learning-rate"], + ) + loss = tf.keras.losses.SparseCategoricalCrossentropy( + reduction=tf.losses.Reduction.NONE + ) + self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=1, + batch_size=self.run_config["batch-size"], + ) + + dp_statement = compute_dp_sgd_privacy_statement( + number_of_examples=self.x_train.shape[0], + batch_size=self.run_config["batch-size"], + num_epochs=1, + noise_multiplier=self.noise_multiplier, + delta=1e-5, + ) + print(dp_statement) + + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + run_config = context.run_config + noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5 + + train_data, test_data = load_data( + partition_id=partition_id, + num_partitions=context.node_config["num-partitions"], + batch_size=context.run_config["batch-size"], + ) + + return FlowerClient(train_data, test_data, noise_multiplier, run_config).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/server_app.py b/examples/tensorflow-privacy/tf_privacy/server_app.py new file mode 100644 index 000000000000..5348492a3ac4 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/server_app.py @@ -0,0 +1,31 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +from typing import List, Tuple + +from flwr.common import Metrics +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters +from .task import load_model + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + parameters = ndarrays_to_parameters(load_model().get_weights()) + strategy = FedAvg( + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/task.py b/examples/tensorflow-privacy/tf_privacy/task.py new file mode 100644 index 000000000000..7bbf2a3e9c09 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/task.py @@ -0,0 +1,52 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import tensorflow as tf + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +fds = None # Cache FederatedDataset + + +def load_model(): + model = tf.keras.Sequential( + [ + tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), + tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(128, activation="relu"), + tf.keras.layers.Dense(10, activation="softmax"), + ] + ) + + return model + + +def load_data(partition_id: int, num_partitions: int, batch_size): + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + + partition = fds.load_partition(partition_id) + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2, seed=42) + x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] + + # Adjust the size of the training dataset to make it evenly divisible by the batch size + remainder = len(x_train) % batch_size + if remainder != 0: + x_train = x_train[:-remainder] + y_train = y_train[:-remainder] + + return (x_train, y_train), (x_test, y_test) diff --git a/examples/vertical-fl/.gitignore b/examples/vertical-fl/.gitignore index 64af4779185a..5d2a2d133ae3 100644 --- a/examples/vertical-fl/.gitignore +++ b/examples/vertical-fl/.gitignore @@ -1,2 +1 @@ -_static/results -!_static/data/train.csv +!data/train.csv diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index ab5d2210d8d5..a9f6fc383060 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -1,11 +1,10 @@ --- -title: Vertical FL Flower Example tags: [vertical, tabular, advanced] dataset: [Titanic] framework: [torch, pandas, scikit-learn] --- -# Vertical Federated Learning example +# Vertical Federated Learning with Flower This example will showcase how you can perform Vertical Federated Learning using Flower. We'll be using the [Titanic dataset](https://www.kaggle.com/competitions/titanic/data) @@ -14,89 +13,6 @@ more details below, but the main idea of Vertical Federated Learning is that each client is holding different feature sets of the same dataset and that the server is holding the labels of this dataset. -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you -can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/vertical-fl . && rm -rf _tmp && cd vertical-fl -``` - -This will create a new directory called `vertical-fl` containing the -following files: - -```shell --- pyproject.toml --- requirements.txt --- _static/data/train.csv --- client.py --- plot.py --- simulation.py --- strategy.py --- task.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in -`pyproject.toml` and `requirements.txt`. We recommend -[Poetry](https://python-poetry.org/docs/) to install those dependencies and -manage your virtual environment ([Poetry -installation](https://python-poetry.org/docs/#installation)) or -[pip](https://pip.pypa.io/en/latest/development/), but feel free to use a -different way of installing dependencies and managing virtual environments if -you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual -environment. To verify that everything works correctly you can run the following -command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according -to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Usage - -Once everything is installed, you can just run: - -```shell -poetry run python3 simulation.py -``` - -for `poetry`, otherwise just run: - -```shell -python3 simulation.py -``` - -This will start the Vertical FL training for 1000 rounds with 3 clients. -Eventhough the number of rounds is quite high, this should only take a few -seconds to run as the model is very small. - -## Explanations - -### Vertical FL vs Horizontal FL - | | Horizontal Federated Learning (HFL or just FL) | Vertical Federated Learning (VFL) | | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Data Distribution | Clients have different data instances but share the same feature space. Think of different hospitals having different patients' data (samples) but recording the same types of information (features). | Each client holds different features for the same instances. Imagine different institutions holding various tests or measurements for the same group of patients. | @@ -106,412 +22,64 @@ seconds to run as the model is very small. | HFL | VFL | | :-----------------------------: | :-----------------------------: | -| ![HFL diagram](_static/hfl.jpg) | ![VFL diagram](_static/vfl.jpg) | +| ![HFL diagram](_static/hfl.png) | ![VFL diagram](_static/vfl.png) | Those diagrams illustrate HFL vs VFL using a simplified version of what we will be building in this example. Note that on the VFL side, the server holds the labels (the `Survived` column) and will be the only one capable of performing evaluation. -### Data - -#### About - -The Titanic Survival dataset is a popular dataset used to predict passenger survival on -the Titanic based on various features. - -You can see an exhaustive list of the features over on [Kaggle](https://www.kaggle.com/competitions/titanic/data). - -The data is stored as a CSV file in `_static/data/train.csv`, it contains 892 -samples with labels. - -#### Preprocessing - -In `task.py`, you'll find the preprocessing functions we'll apply to our data: - -- Passengers are grouped by age: 'Child' for 10 years and under, - 'Adult' for ages between 11 and 40, and 'Elderly' for those over 40. If the age - isn't listed, we'll label it as 'Unknown'. - - ```python - def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - ``` - -- We pull out titles from passengers' names to help our model - understand social status and family roles, simplifying rare titles into a single - 'Rare' category and converting any French titles to their English equivalents. - - ```python - def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - ``` - -- The first letter of each cabin number is used to identify the - cabin area, with any missing entries marked as 'Unknown'. This could provide - insight into the passenger's location on the ship. - -- We remove features like 'PassengerId', 'Name', and - 'Ticket' that won't be necessary for our model's predictions. - -- Lastly, we convert categorical data points such as 'Sex', - 'Pclass', 'Embarked', 'Title', 'Cabin', and the binned 'Age' into One-Hot - encodings. - - ```python - def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - ``` - -#### Partitioning - -In `task.py`, we also partition our data for our 3 clients to mirror real-life -collaborations where different organizations hold different feature sets: - -```python -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions -``` - -Client 1: This client looks at family connections and accommodations, working -with features like the number of parents and children each passenger had on -board ('Parch'), the cabin number ('Cabin'), and the ticket class ('Pclass'). - -Client 2: Here, the focus is on personal attributes. This client examines the -passengers' gender ('Sex') and societal roles as indicated by their titles -('Title'). - -Client 3: The final client handles the rest of the data that the first two don't -see. This includes the remaining features that give a broader view of the -passengers' information. - -Each client is going to train their models on their own unique data without any -idea of the passengers' survival outcomes, which we're trying to predict. - -Once all clients have done their part, we combine their insights to form a -comprehensive understanding, just as if different organizations were pooling -their knowledge while keeping their data private. This is the essence of -Vertical Federated Learning: separate but together, each contributing to a -collective intelligence without sharing sensitive information. - -Note that our final data processing function looks like that: - -```python -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values -``` - -This returns the 3 partitions for our clients and the labels for our server. - -### Models - -#### Clients - -Each client's model is a neural network designed to operate on a distinct subset -of features held by a client. In this example we will use simple linear -regression models. - -```python -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) -``` - -The `input_size` corresponds to the number of features each client has, and this -model maps those features to a 4-dimensional latent space. The outputs are -essentially feature embeddings that capture the patterns within each client's -data slice. These embeddings are then ready to be sent to the server for further -processing. - -#### Server - -The server's model acts as the central aggregator in the VFL system. It's also a -neural network but with a slightly different architecture tailored to its role -in aggregating the client models' outputs. - -```python -class ServerModel(nn.Module): - def __init__(self): - super(ServerModel, self).__init__() - self.fc = nn.Linear(12, 1) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = self.fc(x) - return self.sigmoid(x) -``` - -It comprises a single linear layer that accepts the concatenated outputs from -all client models as its input. The number of inputs to this layer equals the -total number of outputs from the client models (3 x 4 = 12). After processing -these inputs, the linear layer's output is passed through a sigmoid activation -function (`nn.Sigmoid()`), which maps the result to a `(0, 1)` range, providing -a probability score indicative of the likelihood of survival. - -### Strategy - -The strategy we will write to perform the aggregation will inherit from `FedAvg` -and set the following additional attributes: - -```python -self.model = ServerModel(12) -self.initial_parameters = ndarrays_to_parameters( - [val.cpu().numpy() for _, val in self.model.state_dict().items()] -) -self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) -self.criterion = nn.BCELoss() -self.label = torch.tensor(labels).float().unsqueeze(1) -``` - -With `labels` given as an argument to the strategy. +## Set up the project -We then redefine the `aggregate_fit` method: +### Clone the project -```python -def aggregate_fit( - self, - rnd, - results, - failures, -): - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} +Start by cloning the example project: - # Convert results - embedding_results = [ - torch.from_numpy(parameters_to_ndarrays(fit_res.parameters)[0]) - for _, fit_res in results - ] - embeddings_aggregated = torch.cat(embedding_results, dim=1) - embedding_server = embeddings_aggregated.detach().requires_grad_() - output = self.model(embedding_server) - loss = self.criterion(output, self.label) - loss.backward() - - self.optimizer.step() - self.optimizer.zero_grad() - - grads = embedding_server.grad.split([4, 4, 4], dim=1) - np_grads = [grad.numpy() for grad in grads] - parameters_aggregated = ndarrays_to_parameters(np_grads) - - with torch.no_grad(): - correct = 0 - output = self.model(embedding_server) - predicted = (output > 0.5).float() - - correct += (predicted == self.label).sum().item() - - accuracy = correct / len(self.label) * 100 - - metrics_aggregated = {"accuracy": accuracy} - - return parameters_aggregated, metrics_aggregated +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/vertical-fl . \ + && rm -rf _tmp \ + && cd vertical-fl ``` -This is where all the magic happens. We first convert the `np.array`s that we -received from our clients to `tensor`s, before concatenating the 3 embeddings -together. This means that we go from 3 tensors of size `(892, 4)` to 1 tensor of -size `(892, 12)`. The combined embeddings are fed through the server model to -get the prediction output. The loss between the predicted output and the actual -labels is calculated. Backward propagation is then performed to calculate the -gradients, which are used to update the server model's parameters. - -The optimizer updates the server model's parameters based on the calculated -gradients, and the gradients are reset to zero to prepare for the next round of -aggregation. - -The gradients from the server model's embedding layer are then split according -to the size of the output from each client model (assuming equal size for -simplicity here), ready to be sent back to the respective client models. - -Finally, with no gradient calculation needed, the model's predictions are -compared to the true labels to calculate the accuracy of the model after the -update. - -Note that this `aggregate_fit` function returns gradients instead of trained -weights. This is because, in this setting, sharing gradients allows each -participant to benefit from the collective feedback gathered from the entire -pool of data without the need to align their different feature spaces (trained -weights are directly tied to specific features of the dataset but not gradients, -which are just a measure of the sensitivity of the loss function to changes in -the model's parameters). This shared feedback, encapsulated in the gradients, -guides each participant's model to adjust and improve, achieving optimization -not just based on its own data but also leveraging insights from the entire -network's data. - -We do not need to return parameters here because updates are completed locally -in VFL. But the server should still send the gradients back to all clients to -let them continue the back prop and update their local model. In Flower, the -parameters returned by `aggregate_fit` will be stored and sent to -`Client.evaluate` via `configure_fit`. So we take advantage of this and return -our gradients in `aggregate_fit` so that they'll be sent to `Client.evaluate` as -`parameters`. That's also why we can obtain gradients from the `parameters` -argument in `Client.evaluate` (see next section). - -The last thing we have to do is to redefine the `aggregate_evaluate` function to -disable distributed evaluation (as the clients do not hold any labels to test -their local models). +This will create a new directory called `vertical-fl` with the following structure: +following files: -```python -def aggregate_evaluate( - self, - rnd, - results, - failures, -): - return None, {} +```shell +vertical-fl +├── vertical_fl +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines your Strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +├── data/train.csv +└── README.md ``` -### Client class and function - -Our `FlowerClient` class is going to be quite straight forward. - -```python -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) +### Install dependencies and project - def get_parameters(self, config): - pass +Install the dependencies defined in `pyproject.toml` as well as the `mlxexample` package. - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return None +```bash +pip install -e . ``` -After defining our model and data attributes (respectively `self.model` and -`self.train`), we define our `fit` function as such: the `self.model(self.train)` -performs a forward pass using the client's local training data (`self.train`). -This generates the embeddings (feature representations) for the data. To conform -with the return type of the `fit` function, we need to return a list of -`np.array`s (hence the conversion), the number of samples, which won't be used -on the server side, so we just return 1, and then an empty dict. - -For the `evaluate` function, we perform our model's backward pass using the -gradients sent by the server and then update our local model's parameters based -on those new gradients. Note that the `loss` and `num_examples` we return in our -evaluate function are bogus, as they won't be used on the server side. +## Run the project -The `client_fn` we will use in our `start_simulation` function to generate our 3 -clients will be very basic: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```python3 -partitions, label = get_partitions_and_label() +### Run with the Simulation Engine -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() +```bash +flwr run . ``` -We pass a `client_id` and its corresponding partition to each client. - -### Evaluation +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -Please note that we do not perform distributed evaluation. This is because only -the server holds some labels to compare the results to. This is why the only -evaluation we perform is on the server side. - -In this example, we use the `FlowerClient` `evaluate` function for -backpropagation instead of using it for evaluation. We do this because we know -that the `evaluate` function of the clients will be called after the fit -function. This allows us to aggregate our models in `aggregate_fit` and then -send them back to the clients using this `evaluate` function and perform the -backpropagation. This is not done for evaluation, hence why we return `None` in -the `aggregate_evaluate` function of the strategy. - -### Starting the simulation - -Putting everything together, to start our simulation we use the following -function: - -```python -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) +```bash +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -As mentioned before, we train for 1000 rounds but it should still last only -a few seconds. - -Note that we store the results of the simulation into `hist`, this will allow us -to use the `plot.py` file to plot the accuracy as a function of the number of -rounds. - -## Results - -Here we can observe the results after 1000 rounds: +### Run with the Deployment Engine -![Accuracy plot](_static/vfl-accuracy.png) +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/vertical-fl/_static/hfl.jpg b/examples/vertical-fl/_static/hfl.jpg deleted file mode 100644 index 7fd4c47de2b3..000000000000 Binary files a/examples/vertical-fl/_static/hfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/hfl.png b/examples/vertical-fl/_static/hfl.png new file mode 100644 index 000000000000..3078b927788a Binary files /dev/null and b/examples/vertical-fl/_static/hfl.png differ diff --git a/examples/vertical-fl/_static/vfl-accuracy.png b/examples/vertical-fl/_static/vfl-accuracy.png deleted file mode 100644 index c436b6db0825..000000000000 Binary files a/examples/vertical-fl/_static/vfl-accuracy.png and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.jpg b/examples/vertical-fl/_static/vfl.jpg deleted file mode 100644 index a7ce7dbfad31..000000000000 Binary files a/examples/vertical-fl/_static/vfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.png b/examples/vertical-fl/_static/vfl.png new file mode 100644 index 000000000000..89e8db72a952 Binary files /dev/null and b/examples/vertical-fl/_static/vfl.png differ diff --git a/examples/vertical-fl/client.py b/examples/vertical-fl/client.py deleted file mode 100644 index 9f489e70f086..000000000000 --- a/examples/vertical-fl/client.py +++ /dev/null @@ -1,27 +0,0 @@ -import flwr as fl -import torch -from sklearn.preprocessing import StandardScaler - -from task import ClientModel - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) - - def get_parameters(self, config): - pass - - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return 0.0, 1, {} diff --git a/examples/vertical-fl/_static/data/train.csv b/examples/vertical-fl/data/train.csv similarity index 100% rename from examples/vertical-fl/_static/data/train.csv rename to examples/vertical-fl/data/train.csv diff --git a/examples/vertical-fl/plot.py b/examples/vertical-fl/plot.py deleted file mode 100644 index 3dac7c04a3de..000000000000 --- a/examples/vertical-fl/plot.py +++ /dev/null @@ -1,8 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np - -if __name__ == "__main__": - hist = np.load("_static/results/hist.npy", allow_pickle=True).item() - rounds, values = zip(*hist.metrics_distributed_fit["accuracy"]) - plt.plot(np.asarray(rounds), np.asarray(values)) - plt.savefig("_static/results/accuracy.png") diff --git a/examples/vertical-fl/pyproject.toml b/examples/vertical-fl/pyproject.toml index 19dcd0e7a842..9ebc2251c0dd 100644 --- a/examples/vertical-fl/pyproject.toml +++ b/examples/vertical-fl/pyproject.toml @@ -1,18 +1,38 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "vertical-fl" -version = "0.1.0" +version = "1.0.0" description = "PyTorch Vertical FL with Flower" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -torch = "2.1.0" -matplotlib = "3.7.3" -scikit-learn = "1.3.2" -numpy = "1.24.4" -pandas = "2.0.3" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.11.0", + "flwr-datasets>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.3", + "scikit-learn==1.3.2", + "torch==2.1.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "vertical_fl.server_app:app" +clientapp = "vertical_fl.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +learning-rate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 3 # Note that this example will require changes to how VFL is implemented + diff --git a/examples/vertical-fl/requirements.txt b/examples/vertical-fl/requirements.txt deleted file mode 100644 index aee341e4c554..000000000000 --- a/examples/vertical-fl/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.0 -matplotlib==3.7.3 -scikit-learn==1.3.2 -numpy==1.24.4 -pandas==2.0.3 diff --git a/examples/vertical-fl/simulation.py b/examples/vertical-fl/simulation.py deleted file mode 100644 index 1aa1c341d5eb..000000000000 --- a/examples/vertical-fl/simulation.py +++ /dev/null @@ -1,27 +0,0 @@ -from pathlib import Path - -import flwr as fl -import numpy as np - -from client import FlowerClient -from strategy import Strategy -from task import get_partitions_and_label - -partitions, label = get_partitions_and_label() - - -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() - - -# Start Flower server -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) - -results_dir = Path("_static/results") -results_dir.mkdir(exist_ok=True) -np.save(str(results_dir / "hist.npy"), hist) diff --git a/examples/vertical-fl/task.py b/examples/vertical-fl/task.py deleted file mode 100644 index 603a051822e9..000000000000 --- a/examples/vertical-fl/task.py +++ /dev/null @@ -1,90 +0,0 @@ -import numpy as np -import pandas as pd -import torch.nn as nn - - -def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - - -def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - - -def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - - -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values - - -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions - - -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) diff --git a/examples/vertical-fl/vertical_fl/client_app.py b/examples/vertical-fl/vertical_fl/client_app.py new file mode 100644 index 000000000000..d517480da1d4 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/client_app.py @@ -0,0 +1,41 @@ +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from sklearn.preprocessing import StandardScaler +import torch + +from vertical_fl.task import ClientModel, load_data + + +class FlowerClient(NumPyClient): + def __init__(self, v_split_id, data, lr): + self.v_split_id = v_split_id + self.data = torch.tensor(StandardScaler().fit_transform(data)).float() + self.model = ClientModel(input_size=self.data.shape[1]) + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr) + + def get_parameters(self, config): + pass + + def fit(self, parameters, config): + embedding = self.model(self.data) + return [embedding.detach().numpy()], 1, {} + + def evaluate(self, parameters, config): + self.model.zero_grad() + embedding = self.model(self.data) + embedding.backward(torch.from_numpy(parameters[int(self.v_split_id)])) + self.optimizer.step() + return 0.0, 1, {} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + partition, v_split_id = load_data(partition_id, num_partitions=num_partitions) + lr = context.run_config["learning-rate"] + return FlowerClient(v_split_id, partition, lr).to_client() + + +app = ClientApp( + client_fn=client_fn, +) diff --git a/examples/vertical-fl/vertical_fl/server_app.py b/examples/vertical-fl/vertical_fl/server_app.py new file mode 100644 index 000000000000..95620226b707 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/server_app.py @@ -0,0 +1,25 @@ +from flwr.common import Context +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + +from vertical_fl.strategy import Strategy +from vertical_fl.task import process_dataset + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Get dataset + processed_df, _ = process_dataset() + + # Define the strategy + strategy = Strategy(processed_df["Survived"].values) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Start Flower server +app = ServerApp(server_fn=server_fn) diff --git a/examples/vertical-fl/strategy.py b/examples/vertical-fl/vertical_fl/strategy.py similarity index 66% rename from examples/vertical-fl/strategy.py rename to examples/vertical-fl/vertical_fl/strategy.py index 0744fa83662a..9195416076b0 100644 --- a/examples/vertical-fl/strategy.py +++ b/examples/vertical-fl/vertical_fl/strategy.py @@ -17,37 +17,8 @@ def forward(self, x): class Strategy(fl.server.strategy.FedAvg): - def __init__( - self, - labels, - *, - fraction_fit=1, - fraction_evaluate=1, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=2, - evaluate_fn=None, - on_fit_config_fn=None, - on_evaluate_config_fn=None, - accept_failures=True, - initial_parameters=None, - fit_metrics_aggregation_fn=None, - evaluate_metrics_aggregation_fn=None, - ) -> None: - super().__init__( - fraction_fit=fraction_fit, - fraction_evaluate=fraction_evaluate, - min_fit_clients=min_fit_clients, - min_evaluate_clients=min_evaluate_clients, - min_available_clients=min_available_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - on_evaluate_config_fn=on_evaluate_config_fn, - accept_failures=accept_failures, - initial_parameters=initial_parameters, - fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, - ) + def __init__(self, labels, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) self.model = ServerModel(12) self.initial_parameters = ndarrays_to_parameters( [val.cpu().numpy() for _, val in self.model.state_dict().items()] diff --git a/examples/vertical-fl/vertical_fl/task.py b/examples/vertical-fl/vertical_fl/task.py new file mode 100644 index 000000000000..8e76d9419a8a --- /dev/null +++ b/examples/vertical-fl/vertical_fl/task.py @@ -0,0 +1,139 @@ +from pathlib import Path +from logging import WARN +import torch.nn as nn +import numpy as np +import pandas as pd +import torch.nn as nn +from flwr.common.logger import log + +from datasets import Dataset +from flwr_datasets.partitioner import IidPartitioner + +NUM_VERTICAL_SPLITS = 3 + + +def _bin_age(age_series): + bins = [-np.inf, 10, 40, np.inf] + labels = ["Child", "Adult", "Elderly"] + return ( + pd.cut(age_series, bins=bins, labels=labels, right=True) + .astype(str) + .replace("nan", "Unknown") + ) + + +def _extract_title(name_series): + titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) + rare_titles = { + "Lady", + "Countess", + "Capt", + "Col", + "Don", + "Dr", + "Major", + "Rev", + "Sir", + "Jonkheer", + "Dona", + } + titles = titles.replace(list(rare_titles), "Rare") + titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) + return titles + + +def _create_features(df): + # Convert 'Age' to numeric, coercing errors to NaN + df["Age"] = pd.to_numeric(df["Age"], errors="coerce") + df["Age"] = _bin_age(df["Age"]) + df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") + df["Title"] = _extract_title(df["Name"]) + df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) + all_keywords = set(df.columns) + df = pd.get_dummies( + df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] + ) + return df, all_keywords + + +def process_dataset(): + + df = pd.read_csv(Path(__file__).parents[1] / "data/train.csv") + processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() + return _create_features(processed_df) + + +def load_data(partition_id: int, num_partitions: int): + """Partition the data vertically and then horizontally. + + We create three sets of features representing three types of nodes participating in + the federation. + + [{'Cabin', 'Parch', 'Pclass'}, {'Sex', 'Title'}, {'Age', 'Embarked', 'Fare', + 'SibSp', 'Survived'}] + + Once the whole dataset is split vertically and a set of features is selected based + on mod(partition_id, 3), it is split horizontally into `ceil(num_partitions/3)` + partitions. This function returns the partition with index `partition_id % 3`. + """ + + if num_partitions != NUM_VERTICAL_SPLITS: + log( + WARN, + "To run this example with num_partitions other than 3, you need to update how " + "the Vertical FL training is performed. This is because the shapes of the " + "gradients migh not be the same along the first dimension.", + ) + + # Read whole dataset and process + processed_df, features_set = process_dataset() + + # Vertical Split and select + v_partitions = _partition_data_vertically(processed_df, features_set) + v_split_id = np.mod(partition_id, NUM_VERTICAL_SPLITS) + v_partition = v_partitions[v_split_id] + + # Comvert to HuggingFace dataset + dataset = Dataset.from_pandas(v_partition) + + # Split horizontally with Flower Dataset partitioner + num_h_partitions = int(np.ceil(num_partitions / NUM_VERTICAL_SPLITS)) + partitioner = IidPartitioner(num_partitions=num_h_partitions) + partitioner.dataset = dataset + + # Extract partition of the `ClientApp` calling this function + partition = partitioner.load_partition(partition_id % num_h_partitions) + partition.remove_columns(["Survived"]) + + return partition.to_pandas(), v_split_id + + +def _partition_data_vertically(df, all_keywords): + partitions = [] + keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] + keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) + + for keywords in keywords_sets: + partitions.append( + df[ + list( + { + col + for col in df.columns + for kw in keywords + if kw in col or "Survived" in col + } + ) + ] + ) + + return partitions + + +class ClientModel(nn.Module): + def __init__(self, input_size): + super().__init__() + self.fc = nn.Linear(input_size, 4) + + def forward(self, x): + return self.fc(x) diff --git a/examples/whisper-federated-finetuning/pyproject.toml b/examples/whisper-federated-finetuning/pyproject.toml index 27a89578c5a0..3d7bb023537c 100644 --- a/examples/whisper-federated-finetuning/pyproject.toml +++ b/examples/whisper-federated-finetuning/pyproject.toml @@ -9,7 +9,7 @@ description = "On-device Federated Downstreaming for Speech Classification" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } transformers = "4.32.1" tokenizers = "0.13.3" diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index c9259ffa1db4..4b20edd55047 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -9,7 +9,7 @@ description = "Federated XGBoost with Flower (comprehensive)" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { extras = ["simulation"], version = ">=1.7.0,<2.0" } flwr-datasets = ">=0.2.0,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index fa3e9d0dc6fb..a7b047c090f0 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -4,7 +4,7 @@ dataset: [HIGGS] framework: [xgboost] --- -# Flower Example using XGBoost +# Federated Learning with XGBoost and Flower (Quickstart Example) This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. @@ -12,72 +12,60 @@ Tree-based with bagging method is used for aggregation on the server. This project provides a minimal code example to enable you to get started quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). -## Project Setup +## Set up the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-quickstart . && rm -rf flower && cd xgboost-quickstart -``` - -This will create a new directory called `xgboost-quickstart` containing the following files: - -``` --- README.md <- Your're reading this right now --- server.py <- Defines the server-side logic --- client.py <- Defines the client-side logic --- run.sh <- Commands to run experiments --- pyproject.toml <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Start by cloning the example project: ```shell -# From a new python environment, run: -pip install . +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/xgboost-quickstart . \ + && rm -rf _tmp \ + && cd xgboost-quickstart ``` -Then, to verify that everything works correctly you can run the following command: +This will create a new directory called `xgboost-quickstart` with the following structure: ```shell -python3 -c "import flwr" +xgboost-quickstart +├── xgboost_quickstart +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your utilities and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! +### Install dependencies and project -## Run Federated Learning with XGBoost and Flower +Install the dependencies defined in `pyproject.toml` as well as the `xgboost_quickstart` package. -Afterwards you are ready to start the Flower server as well as the clients. -You can simply start the server in a terminal as follows: - -```shell -python3 server.py +```bash +pip install -e . ``` -Now you are ready to start the Flower clients which will participate in the learning. -To do so simply open two more terminal windows and run the following commands. +## Run the project -Start client 1 in the first terminal: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -python3 client.py --partition-id=0 +### Run with the Simulation Engine + +```bash +flwr run . ``` -Start client 2 in the second terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id=1 +```bash +flwr run . --run-config "num-server-rounds=5 params.eta=0.05" ``` -You will see that XGBoost is starting a federated training. - -Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart XGBoost tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) -```shell -poetry run ./run.sh -``` +### Run with the Deployment Engine -Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) -and [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py deleted file mode 100644 index d505a7ede785..000000000000 --- a/examples/xgboost-quickstart/client.py +++ /dev/null @@ -1,207 +0,0 @@ -import argparse -import warnings -from logging import INFO -from typing import Union - -import flwr as fl -import xgboost as xgb -from datasets import Dataset, DatasetDict -from flwr.common import ( - Code, - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - GetParametersIns, - GetParametersRes, - Parameters, - Status, -) -from flwr.common.logger import log -from flwr_datasets import FederatedDataset -from flwr_datasets.partitioner import IidPartitioner - -warnings.filterwarnings("ignore", category=UserWarning) - -# Define arguments parser for the client/partition ID. -parser = argparse.ArgumentParser() -parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", -) -args = parser.parse_args() - - -# Define data partitioning related functions -def train_test_split(partition: Dataset, test_fraction: float, seed: int): - """Split the data into train and validation set given split rate.""" - train_test = partition.train_test_split(test_size=test_fraction, seed=seed) - partition_train = train_test["train"] - partition_test = train_test["test"] - - num_train = len(partition_train) - num_test = len(partition_test) - - return partition_train, partition_test, num_train, num_test - - -def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: - """Transform dataset to DMatrix format for xgboost.""" - x = data["inputs"] - y = data["label"] - new_data = xgb.DMatrix(x, label=y) - return new_data - - -# Load (HIGGS) dataset and conduct partitioning -# We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. -partitioner = IidPartitioner(num_partitions=30) -fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) - -# Load the partition for this `partition_id` -log(INFO, "Loading partition...") -partition = fds.load_partition(partition_id=args.partition_id, split="train") -partition.set_format("numpy") - -# Train/test splitting -train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=0.2, seed=42 -) - -# Reformat data to DMatrix for xgboost -log(INFO, "Reformatting data...") -train_dmatrix = transform_dataset_to_dmatrix(train_data) -valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - -# Hyper-parameters for xgboost training -num_local_round = 1 -params = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} - - -# Define Flower client -class XgbClient(fl.client.Client): - def __init__( - self, - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ): - self.train_dmatrix = train_dmatrix - self.valid_dmatrix = valid_dmatrix - self.num_train = num_train - self.num_val = num_val - self.num_local_round = num_local_round - self.params = params - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) - - def _local_boost(self, bst_input): - # Update trees based on local training data. - for i in range(self.num_local_round): - bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) - - # Bagging: extract the last N=num_local_round trees for sever aggregation - bst = bst_input[ - bst_input.num_boosted_rounds() - - self.num_local_round : bst_input.num_boosted_rounds() - ] - - return bst - - def fit(self, ins: FitIns) -> FitRes: - global_round = int(ins.config["global_round"]) - if global_round == 1: - # First round local training - bst = xgb.train( - self.params, - self.train_dmatrix, - num_boost_round=self.num_local_round, - evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], - ) - else: - bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) - - # Load global model into booster - bst.load_model(global_model) - - # Local training - bst = self._local_boost(bst) - - # Save model - local_model = bst.save_raw("json") - local_model_bytes = bytes(local_model) - - return FitRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), - num_examples=self.num_train, - metrics={}, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - # Load global model - bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) - bst.load_model(para_b) - - # Run evaluation - eval_results = bst.eval_set( - evals=[(self.valid_dmatrix, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - - return EvaluateRes( - status=Status( - code=Code.OK, - message="OK", - ), - loss=0.0, - num_examples=self.num_val, - metrics={"AUC": auc}, - ) - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), -) diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index f1e451fe779a..da3561bfded4 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -3,17 +3,45 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-xgboost" -version = "0.1.0" -description = "XGBoost Federated Learning Quickstart with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "xgboost_quickstart" +version = "1.0.0" +description = "Federated Learning with XGBoost and Flower (Quickstart Example)" +license = "Apache-2.0" dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets>=0.1.0,<1.0.0", - "xgboost>=2.0.0,<3.0.0", + "flwr-nightly[simulation]==1.11.0.dev20240826", + "flwr-datasets>=0.3.0", + "xgboost>=2.0.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "xgboost_quickstart.server_app:app" +clientapp = "xgboost_quickstart.client_app:app" + +[tool.flwr.app.config] +# ServerApp +num-server-rounds = 3 +fraction-fit = 0.1 +fraction-evaluate = 0.1 + +# ClientApp +local-epochs = 1 +params.objective = "binary:logistic" +params.eta = 0.1 # Learning rate +params.max-depth = 8 +params.eval-metric = "auc" +params.nthread = 16 +params.num-parallel-tree = 1 +params.subsample = 1 +params.tree-method = "hist" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 diff --git a/examples/xgboost-quickstart/run.sh b/examples/xgboost-quickstart/run.sh deleted file mode 100755 index b35af58222ab..000000000000 --- a/examples/xgboost-quickstart/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 5 # Sleep for 5s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python3 client.py --partition-id=$i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-quickstart/server.py b/examples/xgboost-quickstart/server.py deleted file mode 100644 index 2246d32686a4..000000000000 --- a/examples/xgboost-quickstart/server.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Dict - -import flwr as fl -from flwr.server.strategy import FedXgbBagging - -# FL experimental settings -pool_size = 2 -num_rounds = 5 -num_clients_per_round = 2 -num_evaluate_clients = 2 - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def config_func(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -# Define strategy -strategy = FedXgbBagging( - fraction_fit=(float(num_clients_per_round) / pool_size), - min_fit_clients=num_clients_per_round, - min_available_clients=pool_size, - min_evaluate_clients=num_evaluate_clients, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=config_func, - on_fit_config_fn=config_func, -) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), - strategy=strategy, -) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/__init__.py b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py new file mode 100644 index 000000000000..470360b377a6 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py @@ -0,0 +1 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" diff --git a/examples/xgboost-quickstart/xgboost_quickstart/client_app.py b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py new file mode 100644 index 000000000000..3aa199a10274 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py @@ -0,0 +1,139 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +import warnings + +from flwr.common.context import Context + +import xgboost as xgb +from flwr.client import Client, ClientApp +from flwr.common.config import unflatten_dict +from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + Parameters, + Status, +) + +from xgboost_quickstart.task import load_data, replace_keys + +warnings.filterwarnings("ignore", category=UserWarning) + + +# Define Flower Client and client_fn +class FlowerClient(Client): + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + + def _local_boost(self, bst_input): + # Update trees based on local training data. + for i in range(self.num_local_round): + bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) + + # Bagging: extract the last N=num_local_round trees for sever aggregation + bst = bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] + + return bst + + def fit(self, ins: FitIns) -> FitRes: + global_round = int(ins.config["global_round"]) + if global_round == 1: + # First round local training + bst = xgb.train( + self.params, + self.train_dmatrix, + num_boost_round=self.num_local_round, + evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], + ) + else: + bst = xgb.Booster(params=self.params) + global_model = bytearray(ins.parameters.tensors[0]) + + # Load global model into booster + bst.load_model(global_model) + + # Local training + bst = self._local_boost(bst) + + # Save model + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=self.num_train, + metrics={}, + ) + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + # Load global model + bst = xgb.Booster(params=self.params) + para_b = bytearray(ins.parameters.tensors[0]) + bst.load_model(para_b) + + # Run evaluation + eval_results = bst.eval_set( + evals=[(self.valid_dmatrix, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=self.num_val, + metrics={"AUC": auc}, + ) + + +def client_fn(context: Context): + # Load model and data + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + train_dmatrix, valid_dmatrix, num_train, num_val = load_data( + partition_id, num_partitions + ) + + cfg = replace_keys(unflatten_dict(context.run_config)) + num_local_round = cfg["local_epochs"] + + # Return Client instance + return FlowerClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + cfg["params"], + ) + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/server_app.py b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py new file mode 100644 index 000000000000..6b81c6caa785 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py @@ -0,0 +1,54 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from typing import Dict + +from flwr.common import Context, Parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedXgbBagging + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_evaluate = context.run_config["fraction-evaluate"] + + # Init an empty Parameter + parameters = Parameters(tensor_type="", tensors=[]) + + # Define strategy + strategy = FedXgbBagging( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp( + server_fn=server_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/task.py b/examples/xgboost-quickstart/xgboost_quickstart/task.py new file mode 100644 index 000000000000..09916d9ac04a --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/task.py @@ -0,0 +1,71 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from logging import INFO + +import xgboost as xgb +from flwr.common import log +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +def train_test_split(partition, test_fraction, seed): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data): + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_clients): + """Load partition HIGGS data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_clients) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + ) + + # Load the partition for this `partition_id` + partition = fds.load_partition(partition_id, split="train") + partition.set_format("numpy") + + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=0.2, seed=42 + ) + + # Reformat data to DMatrix for xgboost + log(INFO, "Reformatting data...") + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + + return train_dmatrix, valid_dmatrix, num_train, num_val + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/glossary/aggregation.mdx b/glossary/aggregation.mdx new file mode 100644 index 000000000000..82cadd6948bb --- /dev/null +++ b/glossary/aggregation.mdx @@ -0,0 +1,18 @@ +--- +title: "Aggregation" +description: "Combine model weights from sampled clients to update the global model. This process enables the global model to learn from each client's data." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +During each Federated Learning round, the server will receive model weights from sampled clients and needs a function to improve its global model using those weights. This is what is called `aggregation`. It can be a simple weighted average function (like `FedAvg`), or can be more complex (e.g. incorporating optimization techniques). The aggregation is where FL's magic happens, it allows the global model to learn and improve from each client's particular data distribution with only their trained weights. + diff --git a/glossary/client.mdx b/glossary/client.mdx new file mode 100644 index 000000000000..52b14f124add --- /dev/null +++ b/glossary/client.mdx @@ -0,0 +1,17 @@ +--- +title: "Client" +description: "A client is any machine with local data that connects to a server, trains on received global model weights, and sends back updated weights. Clients may also evaluate global model weights." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Any machine with access to some data that connects to a server to perform Federated Learning. During each round of FL (if it is sampled), it will receive global model weights from the server, train on the data they have access to, and send the resulting trained weights back to the server. Clients can also be sampled to evaluate the global server weights on the data they have access to, this is called federated evaluation. diff --git a/glossary/docker.mdx b/glossary/docker.mdx new file mode 100644 index 000000000000..9ca079b90f06 --- /dev/null +++ b/glossary/docker.mdx @@ -0,0 +1,22 @@ +--- +title: "Docker" +description: "Docker is a containerization tool that allows for consistent and reliable deployment of applications across different environments." +date: "2024-07-08" +author: + name: "Robert Steiner" + position: "DevOps Engineer at Flower Labs" + website: "https://github.com/Robert-Steiner" +--- + +Docker is an open-source containerization tool for deploying and running applications. Docker +containers encapsulate an application's code, dependencies, and configuration files, allowing +for consistent and reliable deployment across different environments. + +In the context of federated learning, Docker containers can be used to package the entire client +and server application, including all the necessary dependencies, and then deployed on various +devices such as edge devices, cloud servers, or even on-premises servers. + +In Flower, Docker containers are used to containerize various applications like `SuperLink`, +`SuperNode`, and `SuperExec`. Flower's Docker images allow users to quickly get Flower up and +running, reducing the time and effort required to set up and configure the necessary software +and dependencies. diff --git a/glossary/edge-computing.mdx b/glossary/edge-computing.mdx new file mode 100644 index 000000000000..6499a48e8f07 --- /dev/null +++ b/glossary/edge-computing.mdx @@ -0,0 +1,40 @@ +--- +title: "Edge Computing" +description: "Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "IoT" + link: "/glossary/iot" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" +--- + +### Introduction to Edge Computing + +Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users. By performing computation close to the data source, edge computing aims to address limitations typically encountered in centralized computing, such as bandwidth, latency, privacy, and autonomy. + +Edge computing works alongside cloud and fog computing, but each serves different purposes. Cloud computing delivers on-demand resources like data storage, servers, analytics, and networking via the Internet. Fog computing, however, brings computing closer to devices by distributing communication and computation across clusters of IoT or edge devices. While edge computing is sometimes used interchangeably with fog computing, edge computing specifically handles data processing directly at or near the devices themselves, whereas fog computing distributes tasks across multiple nodes, bridging the gap between edge devices and the cloud. + +### Advantages and Use Cases of Edge Computing + +The key benefit of edge computing is that the volume of data moved is significantly reduced because computation runs directly on board the device on the acquired data. This reduces the amount of long-distance communication between machines, which improves latency and reduces transmissions costs. Examples of edge computing that benefit from offloading computation include: +1. Smart watches and fitness monitors that measure live health metrics. +2. Facial recognition and wake word detection on smartphones. +3. Real-time lane departure warning systems in road transport that detect lane lines using on-board videos and sensors. + +### Federated Learning in Edge Computing + +When deploying federated learning systems, edge computing is an important component to consider. Edge computing typically take the role of "clients" in federated learning. In a healthcare use case, servers in different hospitals can train models on their local data. In mobile computing, smartphones perform local training (and inference) on user data such as for next word prediction. + +### Edge Computing with Flower + +With the Flower framework, you can easily deploy federated learning workflows and maximise the use of edge computing resources. Flower provides the infrastructure to perform federated learning, federated evaluation, and federated analytics, all in a easy, scalable and secure way. Start with our tutorial on running Federated Learning on Embedded Devices (link [here](https://github.com/adap/flower/tree/main/examples/embedded-devices)), which shows you how to run Flower on NVidia Jetson devices and Raspberry Pis as your edge compute. diff --git a/glossary/evaluation.mdx b/glossary/evaluation.mdx new file mode 100644 index 000000000000..bf6b36cd0c4b --- /dev/null +++ b/glossary/evaluation.mdx @@ -0,0 +1,19 @@ +--- +title: "Evaluation" +description: "Evaluation measures how well the trained model performs by testing it on each client's local data, providing insights into its generalizability across varied data sources." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Evaluation in machine learning is the process of assessing a model's performance on unseen data to determine its ability to generalize beyond the training set. This typically involves using a separate test set and various metrics like accuracy or F1-score to measure how well the model performs on new data, ensuring it isn't overfitting or underfitting. + +In federated learning, evaluation (or distributed evaluation) refers to the process of assessing a model's performance across multiple clients, such as devices or data centers. Each client evaluates the model locally using its own data and then sends the results to the server, which aggregates all the evaluation outcomes. This process allows for understanding how well the model generalizes to different data distributions without centralizing sensitive data. \ No newline at end of file diff --git a/glossary/federated-learning.mdx b/glossary/federated-learning.mdx new file mode 100644 index 000000000000..5f6b8a7f1732 --- /dev/null +++ b/glossary/federated-learning.mdx @@ -0,0 +1,14 @@ +--- +title: "Federated Learning" +description: "Federated Learning is a machine learning approach where model training occurs on decentralized devices, preserving data privacy and leveraging local computations." +date: "2024-05-23" +author: + name: "Julian Rußmeyer" + position: "UX/UI Designer" + website: "https://www.linkedin.com/in/julian-russmeyer/" +related: + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Federated learning is an approach to machine learning in which the model is trained on multiple decentralized devices or servers with local data samples without exchanging them. Instead of sending raw data to a central server, updates to the model are calculated locally and only the model parameters are aggregated centrally. In this way, user privacy is maintained and communication costs are reduced, while collaborative model training is enabled. diff --git a/glossary/grpc.mdx b/glossary/grpc.mdx new file mode 100644 index 000000000000..af58758d10bd --- /dev/null +++ b/glossary/grpc.mdx @@ -0,0 +1,44 @@ +--- +title: "gRPC" +description: "gRPC is an inter-process communication technology for building distributed apps. It allows developers to connect, invoke, operate, and debug apps as easily as making a local function call." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" + - text: "Protocol Buffers" + link: "/glossary/protocol-buffers" + - text: "Google: gRPC - A true internet scale RPC framework" + link: "https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments" +--- + +### Introduction to gRPC + +gRPC is an inter-process communication technology for building distributed applications. It allows you to connect, invoke, operate, and debug these applications as easily as making a local function call. It can efficiently connect services in and across data centers. It is also applicable in the last mile of distributed computing to connect devices, mobile applications, and browsers to backend services. Supporting various languages like C++, Go, Java, and Python, and platforms like Android and the web, gRPC is a versatile framework for any environment. + +Google first [open-sourced gRPC in 2016](https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments), basing it on their internal remote procedure call (RPC) framework, Stubby, designed to handle tens of billions of requests per second. Built on HTTP/2 and protocol buffers, gRPC is a popular high-performance framework for developers to built micro-services. Notable early adopters of gRPC include Square, Netflix, CockroachDB, Cisco, and Juniper Networks. + +By default, gRPC uses protocol buffers - Google's language-neutral and platform-neutral mechanism for efficiently serializing structured data - as its interface definition language and its underlying message interchange format. The recommended protocol buffer version as of writing is `proto3`, though other formats like JSON can also be used. + +### How does it work? + +gRPC operates similarly to many RPC systems. First, you specify the methods that can be called remotely on the server application, along with their parameters and return type. Then, with the appropriate code (more on this below), a gRPC client application can directly call these methods on the gRPC server application on a different machine as if it were a local object. Note that the definitions of client and server in gRPC is different to federated learning. For clarity, we will refer to client (server) applications in gRPC as gRPC client (server) applications. + +To use gRPC, follow these steps: +1. Define structure for the data you want to serialize in a proto file definition. `*.proto`. +2. Run the protocol buffer compiler `protoc` to generate to data access classes in the preferred language from the `*.proto` service definitions. This step generates the gRPC client and server code, as well as the regular protocol buffer code for handling your message types. +3. Use the generated class in your application to populate, serialize, and retrieve the class protocol buffer messages. + +### Use cases in Federated Learning + +There are several reasons why gRPC is particularly useful in federated learning. First, clients and server in a federation rely on stable and efficient communication. Using Protobuf, a highly efficient binary serialization format, gRPC overcomes the bandwidth limitations in federated learning, such as in low-bandwidth mobile connections. Second, gRPC’s language-independent communication allows developers to use a variety of programming languages, enabling broader adoption for on-device executions. + +### gRPC in Flower + +gRPC's benefits for distributed computing make it a natural choice for the Flower framework. Flower uses gRPC as its primary communication protocol. To make it easier to build your federated learning systems, we have introduced high-level APIs to take care of the serialization and deserialization of the model parameters, configurations, and metrics. For more details on how to use Flower, follow our "Get started with Flower" tutorial here. diff --git a/glossary/inference.mdx b/glossary/inference.mdx new file mode 100644 index 000000000000..06c93a834d2d --- /dev/null +++ b/glossary/inference.mdx @@ -0,0 +1,21 @@ +--- +title: "Inference" +description: "Inference is the phase in which a trained machine learning model applies its learned patterns to new, unseen data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Inference, also known as model prediction, is the stage in the machine learning workflow where a trained model is used to make predictions based on new, unseen data. In a typical machine learning setting, model inference involves the following steps: model loading, where the trained model is loaded into the application or service where it will be used; data preparation, which preprocess the new data in the same way as the training data; and model prediction, where the prepared data is fed into the model to compute outputs based on the learned patterns during training. + +In the context of federated learning (FL), inference can be performed locally on the user's device. A global model updated from FL process is deployed and loaded on individual nodes (e.g., smartphones, hospital servers) for local inference. This allows for keeping all data on-device, enhancing privacy and reducing latency. diff --git a/glossary/iot.mdx b/glossary/iot.mdx new file mode 100644 index 000000000000..ec1932c444f3 --- /dev/null +++ b/glossary/iot.mdx @@ -0,0 +1,48 @@ +--- +title: "IoT" +description: "The Internet of Things (IoT) refers to devices with sensors, software, and tech that connect and exchange data with other systems via the internet or communication networks." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Edge Computing" + link: "/glossary/edge-computing" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" + - text: "Cisco: Redefine Connectivity by Building a Network to Support the Internet of Things" + link: "https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html" +--- + +### Introduction to IoT + +The Internet of Things (IoT) describe devices with sensors, processing ability, software, and other technologies that connect and exchange data with other devices and systems over the Internet or other communications networks. IoT is often also referred as Machine-to-Machine (M2M) connections. Examples of IoT include embedded systems, wireless sensor networks, control systems, automation (home and building). In the consumer market, IoT technology is synonymous with smart home products. The IoT architecture bears resemblance to edge computing, but more broadly encompasses edge devices, gateways, and the cloud. + +### Use cases in Federated Learning + +From the perspective of federated learning, IoT systems provide two common configurations: first as a data source for training, and second as a point for running inference/analytics. + +Cisco's Global Cloud Index estimated that nearly 850 Zettabytes (ZB) of data will be generated by all people, machines and things in 2021 ([link](https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html) to article). In IoT, the data is different because not all of the data needs to be stored and instead, the most impactful business values come from running computations on the data. This positions IoT as an ideal candidate for implementing federated learning systems, where a model trained on a datastream from a single device may not be useful, but when trained collaboratively on hundreds or thousands of devices, yields a better performing and generalisable model. The key benefit is that the generated data remains local on the device and can even be offloaded after multiple rounds of federated learning. Some examples are presented below. + +Once a model is trained (e.g. in a federated way), the model can be put into production. What this means is to deploy the model on the IoT device and compute predictions based on the newly generated/acquired data. + +Federated learning in IoT can be organized on two axes: by industry and by use cases. + +For industry applications, examples include: +1. Healthcare - e.g. vital sign, activity levels, or sleep pattern monitoring using fitness trackers. +2. Transportation - e.g. trajectory prediction, object detection, driver drowsiness detection using on-board sensors and cameras. + +For use cases, examples include: +1. Predictive maintenance - e.g. using data acquired from physical sensors (impedence, temperature, vibration, pressure, viscosity, etc ...) +2. Anomaly detection - e.g. using environmental monitoring sensors for predicting air, noise, or water pollution, using internet network traffic data for network intrusion detection, using fiber optic sensors for remote sensing and monitoring, etc ... +3. Quality assurance and quality control - e.g. using in-line optical, acoustic, or sensor data during manufacturing processes to identify faulty products, etc ... + +### Using Flower for Federated Learning with IoT + +Flower is developed with a deployment engine that allows you to easily deploy your federated learning system on IoT devices. As a Data Scientist/ML Engineer, you will only need to write ClientApps and deploy them to IoT devices without needing to deal with the infrastructure and networking. To further help deployment, we provide [Docker images](https://hub.docker.com/u/flwr) for the SuperLink, SuperNode, and ServerApp so that you can easily ship the requirements of your Flower applications in containers in a production environment. Lastly, Flower supports the development of both Python and C++ clients, which provides developers with flexible ways of building ClientApps for resource-contrained devices. diff --git a/glossary/medical-ai.mdx b/glossary/medical-ai.mdx new file mode 100644 index 000000000000..d557f457c189 --- /dev/null +++ b/glossary/medical-ai.mdx @@ -0,0 +1,24 @@ +--- +title: "Medical AI" +description: "Medical AI involves the application of artificial intelligence technologies to healthcare, enhancing diagnosis, treatment planning, and patient monitoring by analyzing complex medical data." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Medical AI refers to the application of artificial intelligence technologies, particularly machine learning algorithms, to medical and healthcare-related fields. This includes, but is not limited to, tasks such as disease diagnosis, personalized treatment plans, drug development, medical imaging analysis, and healthcare management. The goal of Medical AI is to enhance healthcare services, improve treatment outcomes, reduce costs, and increase efficiency within healthcare systems. + +Federated learning (FL) introduces a novel approach to the training of machine learning models across multiple decentralized devices or servers holding local data samples, without exchanging them. This is particularly appropriate in the medical field due to the sensitive nature of medical data and strict privacy requirements. It leverages the strength of diverse datasets without compromising patient confidentiality, making it an increasingly popular choice in Medical AI applications. + +#### Medical AI in Flower +Flower, a friendly FL framework, is developing a more versatile and privacy-enhancing solution for Medical AI through the use of FL. Please check out [Flower industry healthcare](flower.ai/industry/healthcare) website for more detailed information. diff --git a/glossary/model-training.mdx b/glossary/model-training.mdx new file mode 100644 index 000000000000..ba5923962f1b --- /dev/null +++ b/glossary/model-training.mdx @@ -0,0 +1,24 @@ +--- +title: "Model Training" +description: "Model training is the process of teaching an algorithm to learn from data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Model training is a core component of developing machine learning (ML) systems, where an algorithm learns from data to make predictions or decisions. A typical model training process involves several key steps: dataset preparation, feature selection and engineering, choice of model based on the task (e.g., classification, regression), choice of training algorithm (e.g. optimizer), and model iteration for updating its weights and biases to minimize the loss function, which measures the difference between the predicted and actual outcomes on the training data. The traditional ML model training process typically involves considerable manual effort, whereas deep learning (DL) offers an end-to-end automated process. + +This approach assumes easy access to data and often requires substantial computational resources, depending on the size of the dataset and complexity of the model. However, large amounts of the data in the real world is distributed and protected due to privacy concerns, making it inaccessible for typical (centralized) model training. Federated learning (FL) migrates the model training from data center to local user ends. After local training, each participant sends only their model's updates (not the data) to a central server for aggregation. The updated global model is sent back to the participants for further rounds of local training and updates. This way, the model training benefits from diverse, real-world data without compromising individual data privacy. + +#### Model training in Flower +Flower, a friendly FL framework, offers a wealth of model training examples and baselines tailored for federated environments. Please refer to the [examples](https://flower.ai/docs/examples/) and [baselines](https://flower.ai/docs/baselines/) documentation for more detailed information. diff --git a/glossary/platform-independence.mdx b/glossary/platform-independence.mdx new file mode 100644 index 000000000000..9582fae057ff --- /dev/null +++ b/glossary/platform-independence.mdx @@ -0,0 +1,19 @@ +--- +title: "Platform Independence" +description: "The capability to run program across different hardware and operating systems." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +Platform independence in federated learning refers to the capability of machine learning systems to operate seamlessly across various hardware and operating system environments. This ensures that the federated learning process can function effectively on various devices with different operating systems such as Windows, Linux, Mac OS, iOS, and Android without requiring platform-specific modifications. By achieving platform independence, federated learning frameworks enable efficient data analysis and model training across heterogeneous edge devices, enhancing scalability and flexibility in distributed machine learning scenarios. + +### Platform Independence in Flower + +Flower is interoperable with different operating systems and hardware platforms to work well in heterogeneous edge device environments. \ No newline at end of file diff --git a/glossary/protocol-buffers.mdx b/glossary/protocol-buffers.mdx new file mode 100644 index 000000000000..7e9bf6c7bbc7 --- /dev/null +++ b/glossary/protocol-buffers.mdx @@ -0,0 +1,31 @@ +--- +title: "Protocol Buffers" +description: "Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler." +date: "2024-05-24" +author: + name: "Taner Topal" + position: "Co-Creator and CTO @ Flower Labs" + website: "https://www.linkedin.com/in/tanertopal/" + github: "github.com/tanertopal" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +### Introduction to Protocol Buffers + +Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler. The method involves defining how you want your data to be structured once, then using language specific generated source code to write and read structured data to and from a variety of data streams. + +### How Protocol Buffers Work + +Protocol Buffers require a `.proto` file where the data structure (the messages) is defined. This is essentially a schema describing the data to be serialized. Once the `.proto` file is prepared, it is compiled using the Protobuf compiler (`protoc`), which generates data access classes in supported languages like Java, C++, Python, Swift, Kotlin, and more. These classes provide simple accessors for each field (like standard getters and setters) and methods to serialize the entire structure to a binary format that can be easily transmitted over network protocols or written to a file. + +### Advantages and Use Cases + +The primary advantages of Protocol Buffers include their simplicity, efficiency, and backward compatibility. They are more efficient than XML or JSON as they serialize to a binary format, which makes them both smaller and faster. They support backward compatibility, allowing to modify data structures without breaking deployed programs that are communicating using the protocol. This makes Protobuf an excellent choice for data storage or RPC (Remote Procedure Call) applications where small size, low latency, and schema evolution are critical. + +### Protocol Buffers in Flower + +In the context of Flower, Protocol Buffers play a crucial role in ensuring efficient and reliable communication between the server and clients. Federated learning involves heterogeneous clients (e.g., servers, mobile devices, edge devices) running different environments and programming languages. This setup requires frequent exchanges of model updates and other metadata between the server and clients. Protocol Buffers, with their efficient binary serialization, enable Flower to handle these exchanges with minimal overhead, ensuring low latency and reducing the bandwidth required for communication. Moreover, the backward compatibility feature of Protobuf allows Flower to evolve and update its communication protocols without disrupting existing deployments. Best of all, Flower users typically do not have to deal directly with Protobuf, as Flower provides language-specific abstractions that simplify interaction with the underlying communication protocols. diff --git a/glossary/scalability.mdx b/glossary/scalability.mdx new file mode 100644 index 000000000000..4bfb736ff08c --- /dev/null +++ b/glossary/scalability.mdx @@ -0,0 +1,22 @@ +--- +title: "Scalability" +description: "Scalability ensures systems grow with demand. In Federated Learning, it involves efficiently managing dynamic clients and diverse devices. Flower supports large-scale FL on various devices/ resources." +date: "2024-05-23" +author: + name: "Daniel Nata Nugraha" + position: "Software Engineer" + image: "daniel_nata_nugraha.png" + website: "https://www.linkedin.com/in/daniel-nugraha/" + github: "github.com/danielnugraha" +related: + - text: "Flower Paper" + link: "https://arxiv.org/pdf/2007.14390" + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Scalability is the ability of a system, network, or process to accommodate an increasing amount of work. This involves adding resources (like servers) or optimizing existing ones to maintain or enhance performance. There are two main types of scalability: horizontal scalability (adding more nodes, such as servers) and vertical scalability (adding more power to existing nodes, like increasing CPU or RAM). Ideally, a scalable system can do both, seamlessly adapting to increased demands without significant downtime. Scalability is essential for businesses to grow while ensuring services remain reliable and responsive. +Scalability in Federated Learning involves managing dynamic client participation, as clients may join or leave unpredictably. This requires algorithms that adapt to varying availability and efficiently aggregate updates from numerous models. Additionally, scalable federated learning systems must handle heterogeneous client devices with different processing powers, network conditions, and data distributions, ensuring balanced contributions to the global model. +Scalability in Flower means efficiently conducting large-scale federated learning (FL) training and evaluation. Flower enables researchers to launch FL experiments with many clients using reasonable computing resources, such as a single machine or a multi-GPU rack. Flower supports scaling workloads to millions of clients, including diverse devices like Raspberry Pis, Android and iOS mobile devices, laptops, etc. It offers complete control over connection management and includes a virtual client engine for large-scale simulations. diff --git a/glossary/server.mdx b/glossary/server.mdx new file mode 100644 index 000000000000..efc25a227791 --- /dev/null +++ b/glossary/server.mdx @@ -0,0 +1,17 @@ +--- +title: "Server" +description: "The central entity coordinating the aggregation of local model updates from multiple clients to build a comprehensive, privacy-preserving global model." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Client" + link: "/glossary/client" + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +A server in federated learning plays a pivotal role by managing the distributed training process across various clients. Each client independently trains its local model using the local data and then sends the model updates to the server. The server aggregates the received updates to create a new global model, which is subsequently sent back to the clients. This iterative process allows the global model to improve over time without the need for the clients to share their raw data, ensuring data privacy and minimizing data transfer. \ No newline at end of file diff --git a/glossary/xgboost.mdx b/glossary/xgboost.mdx new file mode 100644 index 000000000000..51b5a2912e0b --- /dev/null +++ b/glossary/xgboost.mdx @@ -0,0 +1,34 @@ +--- +title: "XGBoost" +description: "XGBoost - or eXtreme Gradient Boosting - is an open-source library providing a regularizing gradient boosting decisiong tree framework for many programming languages including Python, C++, and Java." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Quickstart Federated Learning with XGBoost and Flower" + link: "/docs/framework/tutorial-quickstart-xgboost.html" + - text: "Flower Example using XGBoost (Comprehensive)" + link: "/docs/examples/xgboost-comprehensive.html" +--- + +### Introduction to XGBoost + +XGBoost - or eXtreme Gradient Boosting - is an open-source library which provides a regularizing gradient boosting framework for Python, C++, Java, R, Julia, Perl, and Scala. It implements machine learning algorithms based on the gradient boosting concept, where a single model is created from an ensemble of weak learners (decision trees). This is commonly referred as a Gradient Boosting Decision Trees (GBDT), a decision tree ensemble learning algorithm. + +GBDTs are commonly compared with the random forest algorithm. They are similar in the sense that they build multiple decision trees. But the key differences are in how they are built and combined. Random forest first builds full decision trees in parallel from bootstrap samples of the dataset, and then generates the final prediction based on an average of all of the predictions. In contrast, GBDT iteratively trains decision trees with the objective that each subsequent tree reduces the error residuals of the previous model - this is the concept of boosting. The final prediction in a GBDT is a weighted sum of all of the tree predictions. While the bootstrap aggregation method of random forest minimizes variance and overfitting, the boosting method of GBDT minimizes bias and underfitting. + +XGBoost includes many features that optimizes the implementation of GBDT, including parallelized trees training (instead of sequential) and integration with distributed processing frameworks like Apache Spark and Dask. These various performance improvements have historically made XGBoost the preferred framework of choice when training models for supervised learning tasks, and have seen widespread success in Kaggle competitions on structured data. + +### Use cases in Federated Learning + +While there is no way to know before hand what model would perform the best in federated learning, XGBoost is appealing for several reasons: +1. To train the first model, XGBoost hyperparameters require significantly less tuning compared to neural network-based models. +2. XGBoost is known to produce models that perform far better than neural networks on tabular datasets, which can be encountered in real-world federated learning systems such as in healthcare or IoT applications. +3. Feature scaling is unnecessary when training XGBoost models. This not only facilitates fine-tuning on new data distributions, but also supports cross-device and cross-silo federated learning, where the data distributions from participating clients are not know a priori. + +### XGBoost in Flower + +In Flower, we have provided two strategies for performing federated learning with XGBoost: [`FedXgbBagging`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_bagging.py) and [`FedXgbCyclic`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_cyclic.py), which are inspired from the work at Nvidia NVFlare. These implementations allow Flower users to use different aggregation strategies for the XGBoost model. `FedXgbBagging` aggregates trees from all participating clients every round, whereas `FedXgbCyclic` aggregates clients' trees sequentially in a round-robin manner. With these strategies, Flower users can very quickly and easily run and compare the performance of federated learning systems on distributed tabular datasets using state-of-the-art XGBoost aggregation strategies, without needing to implement them from scratch. diff --git a/pyproject.toml b/pyproject.toml index 0d0138a5689b..536d0ddd20c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.11.0" +version = "1.12.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -30,7 +30,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -52,20 +51,24 @@ exclude = [ ] [tool.poetry.scripts] +# `flwr` CLI flwr = "flwr.cli.app:app" -flower-superlink = "flwr.server:run_superlink" -flower-superexec = "flwr.superexec:run_superexec" -flower-supernode = "flwr.client:run_supernode" -flower-client-app = "flwr.client:run_client_app" -flower-server-app = "flwr.server:run_server_app" +# SuperExec (can run with either Deployment Engine or Simulation Engine) +flower-superexec = "flwr.superexec.app:run_superexec" +# Simulation Engine flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" +# Deployment Engine +flower-superlink = "flwr.server.app:run_superlink" +flower-supernode = "flwr.client.supernode.app:run_supernode" +flower-server-app = "flwr.server.run_serverapp:run_server_app" flwr-clientapp = "flwr.client.clientapp:flwr_clientapp" +flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies numpy = "^1.21.0" -grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4" +grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4,!=1.65.5,!=1.66.0,!=1.66.1" protobuf = "^4.25.2" cryptography = "^42.0.4" pycryptodome = "^3.18.0" @@ -75,7 +78,7 @@ tomli = "^2.0.1" tomli-w = "^1.0.0" pathspec = "^0.12.1" # Optional dependencies (Simulation Engine) -ray = { version = "==2.10.0", optional = true, python = ">=3.8,<3.12" } +ray = { version = "==2.10.0", optional = true, python = ">=3.9,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } @@ -97,6 +100,7 @@ docformatter = "==1.7.5" mypy = "==1.8.0" pylint = "==3.0.3" flake8 = "==5.0.4" +parameterized = "==0.9.0" pytest = "==7.4.4" pytest-cov = "==4.1.0" pytest-watcher = "==0.4.1" @@ -132,6 +136,7 @@ PyGithub = "==2.1.1" licensecheck = "==2024" pre-commit = "==3.5.0" sphinx-substitution-extensions = "2022.02.16" +sphinxext-opengraph = "==0.9.1" [tool.isort] profile = "black" @@ -139,7 +144,7 @@ known_first_party = ["flwr", "flwr_tool"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] disable = "duplicate-code,too-few-public-methods,useless-import-alias" @@ -188,7 +193,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] diff --git a/src/docker/base/README.md b/src/docker/base/README.md new file mode 100644 index 000000000000..b17c3d6e5c6f --- /dev/null +++ b/src/docker/base/README.md @@ -0,0 +1,50 @@ +# Flower Base + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/base/alpine/Dockerfile b/src/docker/base/alpine/Dockerfile index 441e0fdd9b85..ee1e11b2d070 100644 --- a/src/docker/base/alpine/Dockerfile +++ b/src/docker/base/alpine/Dockerfile @@ -33,6 +33,8 @@ RUN apk add --no-cache \ # require for compiling grpcio on ARM64 g++ \ libffi-dev \ + # required for installing flwr via git + git \ # create virtual env && python -m venv /python/venv @@ -42,18 +44,27 @@ ENV PATH=/python/venv/bin:$PATH # Install specific version of pip, setuptools and flwr ARG PIP_VERSION ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr RUN pip install -U --no-cache-dir \ pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} AS base -# Upgrade system Python pip and setuptools -# hadolint ignore=DL3013 -RUN pip install -U --no-cache-dir pip setuptools +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} # required by the grpc package RUN apk add --no-cache \ diff --git a/src/docker/base/ubuntu/Dockerfile b/src/docker/base/ubuntu/Dockerfile index 31cc8381b7c5..47655b1a52a1 100644 --- a/src/docker/base/ubuntu/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -32,7 +32,7 @@ RUN apt-get update \ # Install PyEnv and Python ARG PYTHON_VERSION=3.11 ENV PYENV_ROOT=/root/.pyenv -ENV PATH $PYENV_ROOT/bin:$PATH +ENV PATH=$PYENV_ROOT/bin:$PATH # https://github.com/hadolint/hadolint/wiki/DL4006 SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash @@ -50,22 +50,29 @@ RUN LATEST=$(pyenv latest -k ${PYTHON_VERSION}) \ ENV PATH=/usr/local/bin/python/bin:$PATH -# Upgrade system Python pip and setuptools -# hadolint ignore=DL3013 -RUN pip install -U --no-cache-dir pip setuptools \ +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} \ # Use a virtual environment to ensure that Python packages are installed in the same location # regardless of whether the subsequent image build is run with the app or the root user && python -m venv /python/venv ENV PATH=/python/venv/bin:$PATH -ARG PIP_VERSION -ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr RUN pip install -U --no-cache-dir \ pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi FROM $DISTRO:$DISTRO_VERSION AS base diff --git a/src/docker/clientapp/README.md b/src/docker/clientapp/README.md new file mode 100644 index 000000000000..c7975ccd762c --- /dev/null +++ b/src/docker/clientapp/README.md @@ -0,0 +1,33 @@ +# Flower ClientApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` diff --git a/src/docker/complete/compose.yml b/src/docker/complete/compose.yml index 90261249f322..e1dc2f5ffc56 100644 --- a/src/docker/complete/compose.yml +++ b/src/docker/complete/compose.yml @@ -1,17 +1,24 @@ services: # create a SuperLink service superlink: - image: flwr/superlink:${FLWR_VERSION:-1.10.0} + image: flwr/superlink:${FLWR_VERSION:-1.11.1} command: - --insecure # create a SuperExec service superexec: - user: root build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/superexec:${FLWR_VERSION:-1.10.0} + FROM flwr/superexec:${FLWR_VERSION:-1.11.1} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . @@ -29,89 +36,146 @@ services: - superlink="superlink:9091" depends_on: - superlink - volumes: - - apps-volume:/app/.flwr/apps/:rw # create a two SuperNode service with different node configs supernode-1: - user: root - deploy: - resources: - limits: - cpus: "2" + image: flwr/supernode:${FLWR_VERSION:-1.11.1} command: + - --insecure - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + depends_on: + - superlink + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.11.1} + command: - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" depends_on: - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.11.1} + # command: + # - --insecure + # - --superlink + # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # depends_on: + # - superlink + + clientapp-1: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] - - supernode-2: - user: root + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 deploy: resources: limits: cpus: "2" - command: - - --superlink - - superlink:9092 - - --insecure + stop_signal: SIGINT depends_on: - - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro + - supernode-1 + + clientapp-2: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=1,num-partitions=2"] + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 - # uncomment to add another supernode + # uncomment to add another ClientApp # - # supernode-3: - # user: root - # deploy: - # resources: - # limits: - # cpus: "2" - # command: - # - --superlink - # - superlink:9092 - # - --insecure - # depends_on: - # - superlink - # volumes: - # - apps-volume:/app/.flwr/apps/:ro + # clientapp-3: # build: # context: ${PROJECT_DIR:-.} # dockerfile_inline: | - # FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + # FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + # # gcc is required for the fastai quickstart example + # USER root + # RUN apt-get update \ + # && apt-get -y --no-install-recommends install \ + # build-essential \ + # && rm -rf /var/lib/apt/lists/* + # USER app # WORKDIR /app # COPY --chown=app:app pyproject.toml . # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ # && python -m pip install -U --no-cache-dir . - # ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] - -volumes: - apps-volume: + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 diff --git a/src/docker/complete/with-tls.yml b/src/docker/complete/with-tls.yml index 1b8540e09b64..6cbeb2ba7397 100644 --- a/src/docker/complete/with-tls.yml +++ b/src/docker/complete/with-tls.yml @@ -17,7 +17,7 @@ services: - --executor - flwr.superexec.deployment:executor - --executor-config - - superlink="superlink:9091",root-certificates="certificates/superlink-ca.crt" + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" - --ssl-ca-certfile=certificates/ca.crt - --ssl-certfile=certificates/server.pem - --ssl-keyfile=certificates/server.key @@ -35,6 +35,12 @@ services: command: - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" - --root-certificates - certificates/ca.crt secrets: @@ -45,18 +51,30 @@ services: command: - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" - --root-certificates - certificates/ca.crt secrets: - source: superlink-ca-certfile target: /app/certificates/ca.crt - # uncomment to enable TLS on another supernode + # uncomment to enable TLS on another SuperNode # # supernode-3: # command: # - --superlink # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" # - --root-certificates # - certificates/ca.crt # secrets: diff --git a/src/docker/distributed/.gitignore b/src/docker/distributed/.gitignore new file mode 100644 index 000000000000..1a11330c6e95 --- /dev/null +++ b/src/docker/distributed/.gitignore @@ -0,0 +1,3 @@ +superexec-certificates +superlink-certificates +server/state diff --git a/src/docker/distributed/certs.yml b/src/docker/distributed/certs.yml new file mode 100644 index 000000000000..48e157582e40 --- /dev/null +++ b/src/docker/distributed/certs.yml @@ -0,0 +1,6 @@ +services: + gen-certs: + build: + args: + SUPERLINK_IP: ${SUPERLINK_IP:-127.0.0.1} + SUPEREXEC_IP: ${SUPEREXEC_IP:-127.0.0.1} diff --git a/src/docker/distributed/client/compose.yml b/src/docker/distributed/client/compose.yml new file mode 100644 index 000000000000..60a7dcdf9b61 --- /dev/null +++ b/src/docker/distributed/client/compose.yml @@ -0,0 +1,128 @@ +services: + supernode-1: + image: flwr/supernode:${FLWR_VERSION:-1.11.1} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.11.1} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.11.1} + # command: + # - --superlink + # - ${SUPERLINK_IP:-127.0.0.1}:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # - --root-certificates + # - certificates/ca.crt + # secrets: + # - source: superlink-ca-certfile + # target: /app/certificates/ca.crt + + clientapp-1: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-1 + + clientapp-2: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 + + # uncomment to add another ClientApp + # + # clientapp-3: + # build: + # context: ${PROJECT_DIR:-.} + # dockerfile_inline: | + # FROM flwr/clientapp:${FLWR_VERSION:-1.11.1} + + # WORKDIR /app + # COPY --chown=app:app pyproject.toml . + # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + # && python -m pip install -U --no-cache-dir . + + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt diff --git a/src/docker/distributed/server/compose.yml b/src/docker/distributed/server/compose.yml new file mode 100644 index 000000000000..54e9faf14b84 --- /dev/null +++ b/src/docker/distributed/server/compose.yml @@ -0,0 +1,67 @@ +services: + superlink: + image: flwr/superlink:${FLWR_VERSION:-1.11.1} + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + - source: superlink-certfile + target: /app/certificates/server.pem + - source: superlink-keyfile + target: /app/certificates/server.key + ports: + - 9092:9092 + + superexec: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/superexec:${FLWR_VERSION:-1.11.1} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-superexec"] + command: + - --executor + - flwr.superexec.deployment:executor + - --executor-config + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + secrets: + - source: superlink-ca-certfile + target: /app/certificates/superlink-ca.crt + - source: superexec-ca-certfile + target: /app/certificates/ca.crt + - source: superexec-certfile + target: /app/certificates/server.pem + - source: superexec-keyfile + target: /app/certificates/server.key + ports: + - 9093:9093 + depends_on: + - superlink + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt + superlink-certfile: + file: ../superlink-certificates/server.pem + superlink-keyfile: + file: ../superlink-certificates/server.key + superexec-ca-certfile: + file: ../superexec-certificates/ca.crt + superexec-certfile: + file: ../superexec-certificates/server.pem + superexec-keyfile: + file: ../superexec-certificates/server.key diff --git a/src/docker/serverapp/README.md b/src/docker/serverapp/README.md new file mode 100644 index 000000000000..da49eb3596b9 --- /dev/null +++ b/src/docker/serverapp/README.md @@ -0,0 +1,45 @@ +# Flower ServerApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/superexec/README.md b/src/docker/superexec/README.md new file mode 100644 index 000000000000..ed44f24ca7ae --- /dev/null +++ b/src/docker/superexec/README.md @@ -0,0 +1,37 @@ +# Flower SuperExec + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` diff --git a/src/docker/superlink/README.md b/src/docker/superlink/README.md new file mode 100644 index 000000000000..0e20bf3d039f --- /dev/null +++ b/src/docker/superlink/README.md @@ -0,0 +1,35 @@ +# Flower SuperLink + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` diff --git a/src/docker/supernode/README.md b/src/docker/supernode/README.md new file mode 100644 index 000000000000..c2d99a500da4 --- /dev/null +++ b/src/docker/supernode/README.md @@ -0,0 +1,43 @@ +# Flower SuperNode + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.12.0.dev20240830` + - uses Python 3.11 and Ubuntu 22.04 +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` diff --git a/src/proto/flwr/proto/clientappio.proto b/src/proto/flwr/proto/clientappio.proto index 898cb04c5b5b..19d2db50501a 100644 --- a/src/proto/flwr/proto/clientappio.proto +++ b/src/proto/flwr/proto/clientappio.proto @@ -1,3 +1,18 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; @@ -30,9 +45,9 @@ message ClientAppOutputStatus { } message GetTokenRequest {} -message GetTokenResponse { sint64 token = 1; } +message GetTokenResponse { uint64 token = 1; } -message PullClientAppInputsRequest { sint64 token = 1; } +message PullClientAppInputsRequest { uint64 token = 1; } message PullClientAppInputsResponse { Message message = 1; Context context = 2; @@ -41,7 +56,7 @@ message PullClientAppInputsResponse { } message PushClientAppOutputsRequest { - sint64 token = 1; + uint64 token = 1; Message message = 2; Context context = 3; } diff --git a/src/proto/flwr/proto/control.proto b/src/proto/flwr/proto/control.proto new file mode 100644 index 000000000000..8b75c66fccaa --- /dev/null +++ b/src/proto/flwr/proto/control.proto @@ -0,0 +1,32 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/run.proto"; + +service Control { + // Request to create a new run + rpc CreateRun(CreateRunRequest) returns (CreateRunResponse) {} + + // Get the status of a given run + rpc GetRunStatus(GetRunStatusRequest) returns (GetRunStatusResponse) {} + + // Update the status of a given run + rpc UpdateRunStatus(UpdateRunStatusRequest) + returns (UpdateRunStatusResponse) {} +} diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index 63a2f78e6f6d..e26003862a76 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -21,7 +21,6 @@ import "flwr/proto/node.proto"; import "flwr/proto/task.proto"; import "flwr/proto/run.proto"; import "flwr/proto/fab.proto"; -import "flwr/proto/transport.proto"; service Driver { // Request run_id @@ -43,17 +42,8 @@ service Driver { rpc GetFab(GetFabRequest) returns (GetFabResponse) {} } -// CreateRun -message CreateRunRequest { - string fab_id = 1; - string fab_version = 2; - map override_config = 3; - Fab fab = 4; -} -message CreateRunResponse { sint64 run_id = 1; } - // GetNodes messages -message GetNodesRequest { sint64 run_id = 1; } +message GetNodesRequest { uint64 run_id = 1; } message GetNodesResponse { repeated Node nodes = 1; } // PushTaskIns messages diff --git a/src/proto/flwr/proto/exec.proto b/src/proto/flwr/proto/exec.proto index 65faf4386ea0..ad0723c0480c 100644 --- a/src/proto/flwr/proto/exec.proto +++ b/src/proto/flwr/proto/exec.proto @@ -33,6 +33,6 @@ message StartRunRequest { map override_config = 2; map federation_config = 3; } -message StartRunResponse { sint64 run_id = 1; } -message StreamLogsRequest { sint64 run_id = 1; } +message StartRunResponse { uint64 run_id = 1; } +message StreamLogsRequest { uint64 run_id = 1; } message StreamLogsResponse { string log_output = 1; } diff --git a/src/proto/flwr/proto/fab.proto b/src/proto/flwr/proto/fab.proto index 3620a95ff009..6f8e6b87808d 100644 --- a/src/proto/flwr/proto/fab.proto +++ b/src/proto/flwr/proto/fab.proto @@ -1,3 +1,18 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; diff --git a/src/proto/flwr/proto/message.proto b/src/proto/flwr/proto/message.proto index 3230ab0609a9..7066da5b7e76 100644 --- a/src/proto/flwr/proto/message.proto +++ b/src/proto/flwr/proto/message.proto @@ -28,17 +28,17 @@ message Message { } message Context { - sint64 node_id = 1; + uint64 node_id = 1; map node_config = 2; RecordSet state = 3; map run_config = 4; } message Metadata { - sint64 run_id = 1; + uint64 run_id = 1; string message_id = 2; - sint64 src_node_id = 3; - sint64 dst_node_id = 4; + uint64 src_node_id = 3; + uint64 dst_node_id = 4; string reply_to_message = 5; string group_id = 6; double ttl = 7; diff --git a/src/proto/flwr/proto/node.proto b/src/proto/flwr/proto/node.proto index e61d44f0f783..ec72b51b44ec 100644 --- a/src/proto/flwr/proto/node.proto +++ b/src/proto/flwr/proto/node.proto @@ -18,6 +18,6 @@ syntax = "proto3"; package flwr.proto; message Node { - sint64 node_id = 1; + uint64 node_id = 1; bool anonymous = 2; } diff --git a/src/proto/flwr/proto/run.proto b/src/proto/flwr/proto/run.proto index 6adca5c2437b..2c9bd877f66c 100644 --- a/src/proto/flwr/proto/run.proto +++ b/src/proto/flwr/proto/run.proto @@ -17,14 +17,46 @@ syntax = "proto3"; package flwr.proto; +import "flwr/proto/fab.proto"; import "flwr/proto/transport.proto"; message Run { - sint64 run_id = 1; + uint64 run_id = 1; string fab_id = 2; string fab_version = 3; map override_config = 4; string fab_hash = 5; } -message GetRunRequest { sint64 run_id = 1; } + +message RunStatus { + // "starting", "running", "finished" + string status = 1; + // "completed", "failed", "stopped" or "" (non-finished) + string sub_status = 2; + // failure details + string details = 3; +} + +// CreateRun +message CreateRunRequest { + string fab_id = 1; + string fab_version = 2; + map override_config = 3; + Fab fab = 4; +} +message CreateRunResponse { uint64 run_id = 1; } + +// GetRun +message GetRunRequest { uint64 run_id = 1; } message GetRunResponse { Run run = 1; } + +// UpdateRunStatus +message UpdateRunStatusRequest { + uint64 run_id = 1; + RunStatus run_status = 2; +} +message UpdateRunStatusResponse {} + +// GetRunStatus +message GetRunStatusRequest { repeated uint64 run_ids = 1; } +message GetRunStatusResponse { map run_status_dict = 1; } diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 936b8120e495..324a70a5359c 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -37,13 +37,13 @@ message Task { message TaskIns { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } message TaskRes { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } diff --git a/src/py/flwr/cli/app.py b/src/py/flwr/cli/app.py index 93effea6df98..8baccb4638fc 100644 --- a/src/py/flwr/cli/app.py +++ b/src/py/flwr/cli/app.py @@ -19,6 +19,7 @@ from .build import build from .install import install +from .log import log from .new import new from .run import run @@ -35,6 +36,7 @@ app.command()(run) app.command()(build) app.command()(install) +app.command()(log) typer_click_object = get_command(app) diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py index 676bc1723568..137e2dc31aff 100644 --- a/src/py/flwr/cli/build.py +++ b/src/py/flwr/cli/build.py @@ -17,12 +17,11 @@ import os import zipfile from pathlib import Path -from typing import Optional +from typing import Annotated, Optional import pathspec import tomli_w import typer -from typing_extensions import Annotated from .config_utils import load_and_validate from .utils import get_sha256_hash, is_valid_project_name diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py index 233d35a5fa17..79e4973ccf9c 100644 --- a/src/py/flwr/cli/config_utils.py +++ b/src/py/flwr/cli/config_utils.py @@ -17,7 +17,7 @@ import zipfile from io import BytesIO from pathlib import Path -from typing import IO, Any, Dict, List, Optional, Tuple, Union, get_args +from typing import IO, Any, Optional, Union, get_args import tomli @@ -25,7 +25,7 @@ from flwr.common.typing import UserConfigValue -def get_fab_config(fab_file: Union[Path, bytes]) -> Dict[str, Any]: +def get_fab_config(fab_file: Union[Path, bytes]) -> dict[str, Any]: """Extract the config from a FAB file or path. Parameters @@ -62,7 +62,7 @@ def get_fab_config(fab_file: Union[Path, bytes]) -> Dict[str, Any]: return conf -def get_fab_metadata(fab_file: Union[Path, bytes]) -> Tuple[str, str]: +def get_fab_metadata(fab_file: Union[Path, bytes]) -> tuple[str, str]: """Extract the fab_id and the fab_version from a FAB file or path. Parameters @@ -87,7 +87,7 @@ def get_fab_metadata(fab_file: Union[Path, bytes]) -> Tuple[str, str]: def load_and_validate( path: Optional[Path] = None, check_module: bool = True, -) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: +) -> tuple[Optional[dict[str, Any]], list[str], list[str]]: """Load and validate pyproject.toml as dict. Returns @@ -116,7 +116,7 @@ def load_and_validate( return (config, errors, warnings) -def load(toml_path: Path) -> Optional[Dict[str, Any]]: +def load(toml_path: Path) -> Optional[dict[str, Any]]: """Load pyproject.toml and return as dict.""" if not toml_path.is_file(): return None @@ -125,7 +125,7 @@ def load(toml_path: Path) -> Optional[Dict[str, Any]]: return load_from_string(toml_file.read()) -def _validate_run_config(config_dict: Dict[str, Any], errors: List[str]) -> None: +def _validate_run_config(config_dict: dict[str, Any], errors: list[str]) -> None: for key, value in config_dict.items(): if isinstance(value, dict): _validate_run_config(config_dict[key], errors) @@ -137,7 +137,7 @@ def _validate_run_config(config_dict: Dict[str, Any], errors: List[str]) -> None # pylint: disable=too-many-branches -def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: +def validate_fields(config: dict[str, Any]) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml fields.""" errors = [] warnings = [] @@ -183,10 +183,10 @@ def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]] def validate( - config: Dict[str, Any], + config: dict[str, Any], check_module: bool = True, project_dir: Optional[Union[str, Path]] = None, -) -> Tuple[bool, List[str], List[str]]: +) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml.""" is_valid, errors, warnings = validate_fields(config) @@ -210,7 +210,7 @@ def validate( return True, [], [] -def load_from_string(toml_content: str) -> Optional[Dict[str, Any]]: +def load_from_string(toml_content: str) -> Optional[dict[str, Any]]: """Load TOML content from a string and return as dict.""" try: data = tomli.loads(toml_content) diff --git a/src/py/flwr/cli/config_utils_test.py b/src/py/flwr/cli/config_utils_test.py index cad6714521e3..ddabc152bc0f 100644 --- a/src/py/flwr/cli/config_utils_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -17,7 +17,7 @@ import os import textwrap from pathlib import Path -from typing import Any, Dict +from typing import Any from .config_utils import load, validate, validate_fields @@ -155,7 +155,7 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: def test_validate_pyproject_toml_fields_empty() -> None: """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare - config: Dict[str, Any] = {} + config: dict[str, Any] = {} # Execute is_valid, errors, warnings = validate_fields(config) diff --git a/src/py/flwr/cli/install.py b/src/py/flwr/cli/install.py index 4318ccdf9ffb..8e3e9505898c 100644 --- a/src/py/flwr/cli/install.py +++ b/src/py/flwr/cli/install.py @@ -21,10 +21,9 @@ import zipfile from io import BytesIO from pathlib import Path -from typing import IO, Optional, Union +from typing import IO, Annotated, Optional, Union import typer -from typing_extensions import Annotated from flwr.common.config import get_flwr_dir diff --git a/src/py/flwr/cli/log.py b/src/py/flwr/cli/log.py new file mode 100644 index 000000000000..6915de1e00c5 --- /dev/null +++ b/src/py/flwr/cli/log.py @@ -0,0 +1,196 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `log` command.""" + +import sys +import time +from logging import DEBUG, ERROR, INFO +from pathlib import Path +from typing import Annotated, Optional + +import grpc +import typer + +from flwr.cli.config_utils import load_and_validate +from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel +from flwr.common.logger import log as logger + +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) + + +# pylint: disable=unused-argument +def stream_logs(run_id: int, channel: grpc.Channel, period: int) -> None: + """Stream logs from the beginning of a run with connection refresh.""" + + +# pylint: disable=unused-argument +def print_logs(run_id: int, channel: grpc.Channel, timeout: int) -> None: + """Print logs from the beginning of a run.""" + + +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + logger(DEBUG, channel_connectivity) + + +def log( + run_id: Annotated[ + int, + typer.Argument(help="The Flower run ID to query"), + ], + app: Annotated[ + Path, + typer.Argument(help="Path of the Flower project to run"), + ] = Path("."), + federation: Annotated[ + Optional[str], + typer.Argument(help="Name of the federation to run the app on"), + ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream/--show", + help="Flag to stream or print logs from the Flower run", + ), + ] = True, +) -> None: + """Get logs from a Flower project run.""" + typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) + + pyproject_path = app / "pyproject.toml" if app else None + config, errors, warnings = load_and_validate(path=pyproject_path) + + if config is None: + typer.secho( + "Project configuration could not be loaded.\n" + "pyproject.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, + ) + sys.exit() + + if warnings: + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, + ) + + typer.secho("Success", fg=typer.colors.GREEN) + + federation = federation or config["tool"]["flwr"]["federations"].get("default") + + if federation is None: + typer.secho( + "❌ No federation name was provided and the project's `pyproject.toml` " + "doesn't declare a default federation (with a SuperExec address or an " + "`options.num-supernodes` value).", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Validate the federation exists in the configuration + federation_config = config["tool"]["flwr"]["federations"].get(federation) + if federation_config is None: + available_feds = { + fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" + } + typer.secho( + f"❌ There is no `{federation}` federation declared in the " + "`pyproject.toml`.\n The following federations were found:\n\n" + + "\n".join(available_feds), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if "address" not in federation_config: + typer.secho( + "❌ `flwr log` currently works with `SuperExec`. Ensure that the correct" + "`SuperExec` address is provided in the `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + _log_with_superexec(federation_config, run_id, stream) + + +# pylint: disable-next=too-many-branches +def _log_with_superexec( + federation_config: dict[str, str], + run_id: int, + stream: bool, +) -> None: + insecure_str = federation_config.get("insecure") + if root_certificates := federation_config.get("root-certificates"): + root_certificates_bytes = Path(root_certificates).read_bytes() + if insecure := bool(insecure_str): + typer.secho( + "❌ `root_certificates` were provided but the `insecure` parameter" + "is set to `True`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + else: + root_certificates_bytes = None + if insecure_str is None: + typer.secho( + "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + if not (insecure := bool(insecure_str)): + typer.secho( + "❌ No certificate were given yet `insecure` is set to `False`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + channel = create_channel( + server_address=federation_config["address"], + insecure=insecure, + root_certificates=root_certificates_bytes, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + interceptors=None, + ) + channel.subscribe(on_channel_state_change) + + if stream: + try: + while True: + logger(INFO, "Starting logstream for run_id `%s`", run_id) + stream_logs(run_id, channel, CONN_REFRESH_PERIOD) + time.sleep(2) + logger(DEBUG, "Reconnecting to logstream") + except KeyboardInterrupt: + logger(INFO, "Exiting logstream") + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + if e.code() == grpc.StatusCode.CANCELLED: + pass + finally: + channel.close() + else: + logger(INFO, "Printing logstream for run_id `%s`", run_id) + print_logs(run_id, channel, timeout=5) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 0dbfc3d284be..23bfaf21467a 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -18,10 +18,9 @@ from enum import Enum from pathlib import Path from string import Template -from typing import Dict, Optional +from typing import Annotated, Optional import typer -from typing_extensions import Annotated from ..utils import ( is_valid_project_name, @@ -43,6 +42,7 @@ class MlFramework(str, Enum): NUMPY = "NumPy" XGBOOST = "XGBoost" FLOWERTUNE = "FlowerTune" + BASELINE = "Flower Baseline" class LlmChallengeName(str, Enum): @@ -70,7 +70,7 @@ def load_template(name: str) -> str: return tpl_file.read() -def render_template(template: str, data: Dict[str, str]) -> str: +def render_template(template: str, data: dict[str, str]) -> str: """Render template.""" tpl_file = load_template(template) tpl = Template(tpl_file) @@ -85,7 +85,7 @@ def create_file(file_path: Path, content: str) -> None: file_path.write_text(content) -def render_and_create(file_path: Path, template: str, context: Dict[str, str]) -> None: +def render_and_create(file_path: Path, template: str, context: dict[str, str]) -> None: """Render template and write to file.""" content = render_template(template, context) create_file(file_path, content) @@ -136,34 +136,23 @@ def new( username = prompt_text("Please provide your Flower username") if framework is not None: - framework_str_upper = str(framework.value) + framework_str = str(framework.value) else: - framework_value = prompt_options( + framework_str = prompt_options( "Please select ML framework by typing in the number", [mlf.value for mlf in MlFramework], ) - selected_value = [ - name - for name, value in vars(MlFramework).items() - if value == framework_value - ] - framework_str_upper = selected_value[0] - - framework_str = framework_str_upper.lower() llm_challenge_str = None - if framework_str == "flowertune": + if framework_str == MlFramework.FLOWERTUNE: llm_challenge_value = prompt_options( "Please select LLM challenge by typing in the number", sorted([challenge.value for challenge in LlmChallengeName]), ) - selected_value = [ - name - for name, value in vars(LlmChallengeName).items() - if value == llm_challenge_value - ] - llm_challenge_str = selected_value[0] - llm_challenge_str = llm_challenge_str.lower() + llm_challenge_str = llm_challenge_value.lower() + + if framework_str == MlFramework.BASELINE: + framework_str = "baseline" print( typer.style( @@ -174,38 +163,36 @@ def new( ) context = { - "framework_str": framework_str_upper, + "framework_str": framework_str, "import_name": import_name.replace("-", "_"), "package_name": package_name, "project_name": app_name, "username": username, } + template_name = framework_str.lower() + # List of files to render if llm_challenge_str: files = { ".gitignore": {"template": "app/.gitignore.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - "README.md": {"template": f"app/README.{framework_str}.md.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, + "README.md": {"template": f"app/README.{template_name}.md.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, - f"{import_name}/server.py": { - "template": "app/code/flwr_tune/server.py.tpl" + f"{import_name}/server_app.py": { + "template": "app/code/flwr_tune/server_app.py.tpl" }, - f"{import_name}/client.py": { - "template": "app/code/flwr_tune/client.py.tpl" + f"{import_name}/client_app.py": { + "template": "app/code/flwr_tune/client_app.py.tpl" }, - f"{import_name}/app.py": {"template": "app/code/flwr_tune/app.py.tpl"}, f"{import_name}/models.py": { "template": "app/code/flwr_tune/models.py.tpl" }, f"{import_name}/dataset.py": { "template": "app/code/flwr_tune/dataset.py.tpl" }, - f"{import_name}/conf/config.yaml": { - "template": "app/code/flwr_tune/config.yaml.tpl" - }, - f"{import_name}/conf/static_config.yaml": { - "template": "app/code/flwr_tune/static_config.yaml.tpl" + f"{import_name}/strategy.py": { + "template": "app/code/flwr_tune/strategy.py.tpl" }, } @@ -237,30 +224,43 @@ def new( files = { ".gitignore": {"template": "app/.gitignore.tpl"}, "README.md": {"template": "app/README.md.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, f"{import_name}/server_app.py": { - "template": f"app/code/server.{framework_str}.py.tpl" + "template": f"app/code/server.{template_name}.py.tpl" }, f"{import_name}/client_app.py": { - "template": f"app/code/client.{framework_str}.py.tpl" + "template": f"app/code/client.{template_name}.py.tpl" }, } # Depending on the framework, generate task.py file frameworks_with_tasks = [ - MlFramework.PYTORCH.value.lower(), - MlFramework.JAX.value.lower(), - MlFramework.HUGGINGFACE.value.lower(), - MlFramework.MLX.value.lower(), - MlFramework.TENSORFLOW.value.lower(), - MlFramework.XGBOOST.value.lower(), + MlFramework.PYTORCH.value, + MlFramework.JAX.value, + MlFramework.HUGGINGFACE.value, + MlFramework.MLX.value, + MlFramework.TENSORFLOW.value, + MlFramework.XGBOOST.value, ] if framework_str in frameworks_with_tasks: files[f"{import_name}/task.py"] = { - "template": f"app/code/task.{framework_str}.py.tpl" + "template": f"app/code/task.{template_name}.py.tpl" } + if framework_str == "baseline": + # Include additional files for baseline template + for file_name in ["model", "dataset", "strategy", "utils", "__init__"]: + files[f"{import_name}/{file_name}.py"] = { + "template": f"app/code/{file_name}.{template_name}.py.tpl" + } + + # Replace README.md + files["README.md"]["template"] = f"app/README.{template_name}.md.tpl" + + # Add LICENSE + files["LICENSE"] = {"template": "app/LICENSE.tpl"} + for file_path, value in files.items(): render_and_create( file_path=project_dir / file_path, @@ -277,7 +277,7 @@ def new( ) ) - _add = " huggingface-cli login\n" if framework_str == "flowertune" else "" + _add = " huggingface-cli login\n" if llm_challenge_str else "" print( typer.style( f" cd {package_name}\n" + " pip install -e .\n" + _add + " flwr run\n", diff --git a/baselines/baseline_template/LICENSE b/src/py/flwr/cli/new/templates/app/LICENSE.tpl similarity index 99% rename from baselines/baseline_template/LICENSE rename to src/py/flwr/cli/new/templates/app/LICENSE.tpl index d64569567334..7a4a3ea2424c 100644 --- a/baselines/baseline_template/LICENSE +++ b/src/py/flwr/cli/new/templates/app/LICENSE.tpl @@ -199,4 +199,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl new file mode 100644 index 000000000000..9bbbe8f22794 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl @@ -0,0 +1,127 @@ +--- +title: title of the paper # TODO +url: https://arxiv.org/abs/2007.14390 # TODO: update with the link to your paper +labels: [label1, label2] # TODO: please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "". Remove this comment once you are done. +dataset: [dataset1, dataset2] # TODO: list of datasets you include in your baseline. Do not use "". Remove this comment once you are done. +--- + +> [!IMPORTANT] +> This is the template for your `README.md`. Please fill-in the information in all areas with a :warning: symbol. +> Please refer to the [Flower Baselines contribution](https://flower.ai/docs/baselines/how-to-contribute-baselines.html) and [Flower Baselines usage](https://flower.ai/docs/baselines/how-to-use-baselines.html) guides for more details. +> Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. +> Please remove this [!IMPORTANT] block once you are done with your `README.md` as well as all the `:warning:` symbols and the comments next to them. + +> [!IMPORTANT] +> To help having all baselines similarly formatted and structured, we have included two scripts in `baselines/dev` that when run will format your code and run some tests checking if it's formatted. +> These checks use standard packages such as `isort`, `black`, `pylint` and others. You as a baseline creator will need to install additional pacakges. These are already specified in the `pyproject.toml` of +> your baseline. Follow these steps: + +```bash +# Create a python env +pyenv virtualenv 3.10.14 $project_name + +# Activate it +pyenv activate $project_name + +# Install project including developer packages +# Note the `-e` this means you install it in editable mode +# so even if you change the code you don't need to do `pip install` +# again. However, if you add a new dependency to `pyproject.toml` you +# will need to re-run the command below +pip install -e ".[dev]" + +# Even without modifying or adding new code, you can run your baseline +# with the placeholder code generated when you did `flwr new`. If you +# want to test this to familiarise yourself with how flower apps are +# executed, execute this from the directory where you `pyproject.toml` is: +flwr run . + +# At anypoint during the process of creating your baseline you can +# run the formatting script. For this do: +cd .. # so you are in the `flower/baselines` directory + +# Run the formatting script (it will auto-correct issues if possible) +./dev/format-baseline.sh $project_name + +# Then, if the above is all good, run the tests. +./dev/test-baseline.sh $project_name +``` + +> [!IMPORTANT] +> When you open a PR to get the baseline merged into the main Flower repository, the `./dev/test-baseline.sh` script will run. Only if test pass, the baseline can be merged. +> Some issues highlighted by the tests script are easier than others to fix. Do not hesitate in reaching out for help to us (e.g. as a comment in your PR) if you are stuck with these. +> Before opening your PR, please remove the code snippet above as well all the [!IMPORTANT] message blocks. Yes, including this one. + +# :warning: *_Title of your baseline_* # Also copy this title to the `description` in the `[project]` section of your `pyproject.toml`. + +> [!NOTE] +> If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. [paper](https://arxiv.org/abs/1512.03385)). If your paper is in from a journal or conference proceedings, please follow the same logic._* + +**Authors:** :warning: *_list authors of the paper_* + +**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* + + +## About this baseline + +**What’s implemented:** :warning: *_Concisely describe what experiment(s) (e.g. Figure 1, Table 2, etc) in the publication can be replicated by running the code. Please only use a few sentences. ”_* + +**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset). We highly recommend using [FlowerDatasets](https://flower.ai/docs/datasets/index.html) to download and partition your dataset. If you have other ways to download the data, you can also use `FlowerDatasets` to partiion it._* + +**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Indicate how long it took to run the experiments. Someone out there might not have access to the same resources you have so, could you list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* + +**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* + + +## Experimental Setup + +**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* + +**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* + +**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* + +**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* + + +## Environment Setup + +:warning: _Specify the steps to create and activate your environment and install the baseline project. Most baselines are expected to require minimal steps as shown below. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ + +:warning: _The dependencies for your baseline are listed in the `pyproject.toml`, extend it with additional packages needed for your baseline._ + +:warning: _Baselines should use Python 3.10, [pyenv](https://github.com/pyenv/pyenv), and the [virtualenv](https://github.com/pyenv/pyenv-virtualenv) plugging. + +```bash +# Create the virtual environment +pyenv virtualenv 3.10.14 + +# Activate it +pyenv activate + +# Install the baseline +pip install -e . +``` + +:warning: _If your baseline requires running some script before starting an experiment, please indicate so here_. + +## Running the Experiments + +:warning: _Make sure you have adjusted the `client-resources` in the federation in `pyproject.toml` so your simulation makes the best use of the system resources available._ + +:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ + +:warning: _You might want to add more hyperparameters and settings for your baseline. You can do so by extending `[tool.flwr.app.config]` in `pyproject.toml`. In addition, you can create a new `.toml` file that can be passed with the `--run-config` command (see below an example) to override several config values **already present** in `pyproject.toml`._ +```bash +# it is likely that for one experiment you need to override some arguments. +flwr run . --run-config learning-rate=0.1,coefficient=0.123 + +# or you might want to load different `.toml` configs all together: +flwr run . --run-config .toml +``` + +:warning: _It is preferable to show a single commmand (or multilple commands if they belong to the same experiment) and then a table/plot with the expected results, instead of showing all the commands first and then all the results/plots._ +:warning: _If you present plots or other figures, please include either a Jupyter notebook showing how to create them or include a utility function that can be called after the experiments finish running._ +:warning: If you include plots or figures, save them in `.png` format and place them in a new directory named `_static` at the same level as your `README.md`. diff --git a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl index 2b59937e4130..4bdc9c779a29 100644 --- a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl @@ -23,10 +23,12 @@ pip install -e . ## Experimental setup -The dataset is partitioned into $num_clients shards with IID fashion serving as clients. -We randomly sample $fraction_fit clients to be available for each round, -and the federated fine-tuning lasts for `200` rounds. -All settings are defined in `$project_name/conf/static_config.yaml`, which is not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). +The dataset is divided into $num_clients partitions in an IID fashion, a partition is assigned to each ClientApp. +We randomly sample a fraction ($fraction_fit) of the total nodes to participate in each round, for a total of `200` rounds. +All settings are defined in `pyproject.toml`. + +> [!IMPORTANT] +> Please note that `[tool.flwr.app.config.static]` and `options.num-supernodes` under `[tool.flwr.federations.local-simulation]` are not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). ## Running the challenge @@ -39,7 +41,7 @@ huggingface-cli login ``` Run the challenge with default config values. -The configs are in `$project_name/conf/config.yaml` and `$project_name/conf/static_config.yaml`, and are loaded automatically. +The configs are defined in `[tool.flwr.app.config]` entry of `pyproject.toml`, and are loaded automatically. ```bash flwr run @@ -53,4 +55,12 @@ We use Mistral-7B model with 4-bit quantization as default. The estimated VRAM c | :--------: | :--------: | :--------: | :--------: | :--------: | | VRAM | ~25.50 GB | ~17.30 GB | ~22.80 GB | ~17.40 GB | -You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which is specified with `flower.engine.simulation` in `pyproject.toml`. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which are specified with `options.backend.clientapp-cpus` and `options.backend.clientapp-gpus` under `[tool.flwr.federations.local-simulation]` entry in `pyproject.toml`. + + +## Model saving + +The global PEFT model checkpoints are saved every 5 rounds after aggregation on the sever side as default, which can be specified with `train.save-every-round` under [tool.flwr.app.config] entry in `pyproject.toml`. + +> [!NOTE] +> Please provide the last PEFT checkpoint if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). diff --git a/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl new file mode 100644 index 000000000000..83a475f20d27 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl @@ -0,0 +1,58 @@ +"""$project_name: A Flower Baseline.""" + +import torch + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.dataset import load_data +from $import_name.model import Net, get_weights, set_weights, test, train + + +class FlowerClient(NumPyClient): + """A class defining the client.""" + + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + """Traim model using this client's data.""" + set_weights(self.net, parameters) + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + self.device, + ) + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def evaluate(self, parameters, config): + """Evaluate model using this client's data.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + # Load model and data + net = Net() + partition_id = int(context.node_config["partition-id"]) + num_partitions = int(context.node_config["num-partitions"]) + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl index 3041a69e3aaa..840f938b4ecc 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl @@ -1,18 +1,11 @@ """$project_name: A Flower / $framework_str app.""" +import torch from flwr.client import ClientApp, NumPyClient from flwr.common import Context from transformers import AutoModelForSequenceClassification -from $import_name.task import ( - get_weights, - load_data, - set_weights, - train, - test, - CHECKPOINT, - DEVICE, -) +from $import_name.task import get_weights, load_data, set_weights, test, train # Flower client @@ -22,37 +15,34 @@ class FlowerClient(NumPyClient): self.trainloader = trainloader self.testloader = testloader self.local_epochs = local_epochs - - def get_parameters(self, config): - return get_weights(self.net) - - def set_parameters(self, parameters): - set_weights(self.net, parameters) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) def fit(self, parameters, config): - self.set_parameters(parameters) - train( - self.net, - self.trainloader, - epochs=self.local_epochs, - ) - return self.get_parameters(config={}), len(self.trainloader), {} + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader) + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) return float(loss), len(self.testloader), {"accuracy": accuracy} def client_fn(context: Context): - # Load model and data - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) + # Get this client's dataset partition partition_id = context.node_config["partition-id"] num_partitions = context.node_config["num-partitions"] - trainloader, valloader = load_data(partition_id, num_partitions) + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) + + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + local_epochs = context.run_config["local-epochs"] # Return Client instance diff --git a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl index 48ee3b4f5356..f8c148691561 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl @@ -17,9 +17,6 @@ class FlowerClient(NumPyClient): self.batch_size = batch_size self.verbose = verbose - def get_parameters(self, config): - return self.model.get_weights() - def fit(self, parameters, config): self.model.set_weights(parameters) self.model.fit( diff --git a/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl new file mode 100644 index 000000000000..46f1f64418c0 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl @@ -0,0 +1,36 @@ +"""$project_name: A Flower Baseline.""" + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + +FDS = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition CIFAR10 data.""" + # Only initialize `FederatedDataset` once + global FDS # pylint: disable=global-statement + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = FDS.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl deleted file mode 100644 index 637658c5b23c..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl +++ /dev/null @@ -1,89 +0,0 @@ -"""$project_name: A Flower / FlowerTune app.""" - -import os -import warnings -from datetime import datetime - -from flwr_datasets import FederatedDataset -from hydra import compose, initialize -from hydra.utils import instantiate - -from flwr.client import ClientApp -from flwr.common import Context, ndarrays_to_parameters -from flwr.server import ServerApp, ServerAppComponents, ServerConfig - -from $import_name.client_app import gen_client_fn, get_parameters -from $import_name.dataset import get_tokenizer_and_data_collator_and_propt_formatting -from $import_name.models import get_model -from $import_name.server_app import fit_weighted_average, get_evaluate_fn, get_on_fit_config - -# Avoid warnings -warnings.filterwarnings("ignore", category=UserWarning) -os.environ["TOKENIZERS_PARALLELISM"] = "true" -os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" - -# Initialise regular config -with initialize(config_path="conf", version_base="1.1"): - cfg = compose(config_name="config") - -# Initialise static config -with initialize(config_path="conf", version_base="1.1"): - cfg_static = compose(config_name="static_config") - -cfg.train.num_rounds = cfg_static.num_rounds - -# Create output directory given current timestamp -current_time = datetime.now() -folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") -save_path = os.path.join(os.getcwd(), f"results/{folder_name}") -os.makedirs(save_path, exist_ok=True) - -# Partition dataset and get dataloaders -partitioner = instantiate(cfg_static.partitioner) -fds = FederatedDataset( - dataset=cfg_static.dataset.name, partitioners={"train": partitioner} -) -( - tokenizer, - data_collator, - formatting_prompts_func, -) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) - -# ClientApp for Flower Next -client = ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - ), -) - -# Get initial model weights -init_model = get_model(cfg.model) -init_model_parameters = get_parameters(init_model) -init_model_parameters = ndarrays_to_parameters(init_model_parameters) - -def server_fn(context: Context): - # Instantiate strategy according to config. Here we pass other arguments - # that are only defined at runtime. - strategy = instantiate( - cfg.strategy, - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, - initial_parameters=init_model_parameters, - evaluate_fn=get_evaluate_fn( - cfg.model, cfg.train.save_every_round, cfg_static.num_rounds, save_path - ), - ) - - config = ServerConfig(num_rounds=cfg_static.num_rounds) - - return ServerAppComponents(strategy=strategy, config=config) - - -# ServerApp for Flower Next -server = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl similarity index 52% rename from src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl rename to src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl index 2472e23ece44..415898ba117b 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl @@ -1,19 +1,35 @@ """$project_name: A Flower / FlowerTune app.""" -from collections import OrderedDict -from typing import Callable, Dict, Tuple +import os +import warnings +from typing import Dict, Tuple import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar from omegaconf import DictConfig -from peft import get_peft_model_state_dict, set_peft_model_state_dict + from transformers import TrainingArguments from trl import SFTTrainer -from flwr.client import NumPyClient -from flwr.common import Context -from flwr.common.typing import NDArrays, Scalar -from $import_name.dataset import reformat -from $import_name.models import cosine_annealing, get_model +from $import_name.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from $import_name.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) # pylint: disable=too-many-arguments @@ -29,7 +45,7 @@ class FlowerClient(NumPyClient): tokenizer, formatting_prompts_func, data_collator, - save_path, + num_rounds, ): # pylint: disable=too-many-arguments self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.train_cfg = train_cfg @@ -37,13 +53,12 @@ class FlowerClient(NumPyClient): self.tokenizer = tokenizer self.formatting_prompts_func = formatting_prompts_func self.data_collator = data_collator - self.save_path = save_path + self.num_rounds = num_rounds + self.trainset = trainset # instantiate model self.model = get_model(model_cfg) - self.trainset = trainset - def fit( self, parameters: NDArrays, config: Dict[str, Scalar] ) -> Tuple[NDArrays, int, Dict]: @@ -52,13 +67,13 @@ class FlowerClient(NumPyClient): new_lr = cosine_annealing( int(config["current_round"]), - self.train_cfg.num_rounds, + self.num_rounds, self.train_cfg.learning_rate_max, self.train_cfg.learning_rate_min, ) self.training_argumnets.learning_rate = new_lr - self.training_argumnets.output_dir = self.save_path + self.training_argumnets.output_dir = config["save_path"] # Construct trainer trainer = SFTTrainer( @@ -81,46 +96,31 @@ class FlowerClient(NumPyClient): ) -def set_parameters(model, parameters: NDArrays) -> None: - """Change the parameters of the model using the given ones.""" - peft_state_dict_keys = get_peft_model_state_dict(model).keys() - params_dict = zip(peft_state_dict_keys, parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - set_peft_model_state_dict(model, state_dict) - - -def get_parameters(model) -> NDArrays: - """Return the parameters of the current net.""" - state_dict = get_peft_model_state_dict(model) - return [val.cpu().numpy() for _, val in state_dict.items()] - - -def gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - model_cfg: DictConfig, - train_cfg: DictConfig, - save_path: str, -) -> Callable[[Context], FlowerClient]: # pylint: disable=too-many-arguments - """Generate the client function that creates the Flower Clients.""" - - def client_fn(context: Context) -> FlowerClient: - """Create a Flower client representing a single organization.""" - # Let's get the partition corresponding to the i-th client - partition_id = context.node_config["partition-id"] - client_trainset = fds.load_partition(partition_id, "train") - client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str") - - return FlowerClient( - model_cfg, - train_cfg, - client_trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ).to_client() - - return client_fn +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.static.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl deleted file mode 100644 index 9f700dd5b8da..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl +++ /dev/null @@ -1,34 +0,0 @@ -# Federated Instruction Tuning ---- -model: - name: "mistralai/Mistral-7B-v0.3" - quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes - gradient_checkpointing: True - lora: - peft_lora_r: 32 - peft_lora_alpha: 64 - -train: - num_rounds: null - save_every_round: 5 - learning_rate_max: 5e-5 - learning_rate_min: 1e-6 - seq_length: 512 - training_arguments: - output_dir: null # to be set by hydra - learning_rate: null # to be set by the client - per_device_train_batch_size: 16 - gradient_accumulation_steps: 1 - logging_steps: 10 - num_train_epochs: 3 - max_steps: 10 - report_to: null - save_steps: 1000 - save_total_limit: 10 - gradient_checkpointing: True - lr_scheduler_type: "constant" - -strategy: - _target_: flwr.server.strategy.FedAvg - fraction_fit: $fraction_fit - fraction_evaluate: 0.0 # no client evaluation diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl index 1b3691d7cf3c..41381ef7c7a3 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl @@ -1,8 +1,12 @@ """$project_name: A Flower / FlowerTune app.""" +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM +FDS = None # Cache FederatedDataset + def formatting_prompts_func(example): """Construct prompts.""" @@ -24,7 +28,6 @@ def formatting_prompts_func(example): def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): """Get tokenizer, data_collator and prompt formatting.""" - # From: https://huggingface.co/docs/trl/en/sft_trainer tokenizer = AutoTokenizer.from_pretrained( model_name, use_fast=True, padding_side="right" ) @@ -49,9 +52,36 @@ def formatting(dataset): def reformat(dataset, llm_task): """Reformat datasets.""" dataset = dataset.rename_column("output", "response") - if llm_task == "finance" or llm_task == "code": + if llm_task in ["finance", "code"]: dataset = dataset.map(formatting, remove_columns=["input"]) if llm_task == "medical": dataset = dataset.remove_columns(["instruction"]) dataset = dataset.rename_column("input", "instruction") return dataset + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = reformat(client_trainset, llm_task="generalnlp") + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl index a2794f35518c..3f3f95c8b8eb 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl @@ -4,10 +4,18 @@ import math import torch from omegaconf import DictConfig -from peft import LoraConfig, get_peft_model +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) from peft.utils import prepare_model_for_kbit_training from transformers import AutoModelForCausalLM, BitsAndBytesConfig +from flwr.common.typing import NDArrays + def cosine_annealing( current_round: int, @@ -22,9 +30,6 @@ def cosine_annealing( def get_model(model_cfg: DictConfig): """Load model with appropriate quantization config and other optimizations. - - Please refer to this example for `peft + BitsAndBytes`: - https://github.com/huggingface/peft/blob/main/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py """ if model_cfg.quantization == 4: quantization_config = BitsAndBytesConfig(load_in_4bit=True) @@ -57,3 +62,17 @@ def get_model(model_cfg: DictConfig): model.config.use_cache = False return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl deleted file mode 100644 index 5dd4d881f2f1..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl +++ /dev/null @@ -1,48 +0,0 @@ -"""$project_name: A Flower / FlowerTune app.""" - -from $import_name.client_app import set_parameters -from $import_name.models import get_model - - -# Get function that will be executed by the strategy's evaluate() method -# Here we use it to save global model checkpoints -def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): - """Return an evaluation function for saving global model.""" - - def evaluate(server_round: int, parameters, config): - # Save model - if server_round != 0 and ( - server_round == total_round or server_round % save_every_round == 0 - ): - # Init model - model = get_model(model_cfg) - set_parameters(model, parameters) - - model.save_pretrained(f"{save_path}/peft_{server_round}") - - return 0.0, {} - - return evaluate - - -def get_on_fit_config(): - """ - Return a function that will be used to construct the config - that the client's fit() method will receive. - """ - - def fit_config_fn(server_round: int): - fit_config = {"current_round": server_round} - return fit_config - - return fit_config_fn - - -def fit_weighted_average(metrics): - """Aggregate (federated) evaluation metrics.""" - # Multiply accuracy of each client by number of examples used - losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"train_loss": sum(losses) / sum(examples)} diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl new file mode 100644 index 000000000000..7d4de0f73dbf --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl @@ -0,0 +1,94 @@ +"""$project_name: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from omegaconf import DictConfig + +from $import_name.models import get_model, get_parameters, set_parameters +from $import_name.dataset import replace_keys +from $import_name.strategy import FlowerTuneLlm + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the + client's fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FlowerTuneLlm( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl deleted file mode 100644 index a8a4039fc831..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl +++ /dev/null @@ -1,11 +0,0 @@ -# Federated Instruction Tuning (static) ---- -dataset: - name: $dataset_name - -# FL experimental settings -num_clients: $num_clients # total number of clients -num_rounds: 200 -partitioner: - _target_: flwr_datasets.partitioner.IidPartitioner - num_partitions: $num_clients diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl new file mode 100644 index 000000000000..8accd70c4e76 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl @@ -0,0 +1,83 @@ +"""$project_name: A Flower / FlowerTune app.""" + +from io import BytesIO +from logging import INFO, WARN +from typing import List, Tuple, Union + +from flwr.common import FitIns, FitRes, Parameters, log, parameters_to_ndarrays +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg + + +class FlowerTuneLlm(FedAvg): + """Customised FedAvg strategy implementation. + + This class behaves just like FedAvg but also tracks the communication + costs associated with `fit` over FL rounds. + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.comm_tracker = CommunicationTracker() + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ): + """Configure the next round of training.""" + return_clients = super().configure_fit(server_round, parameters, client_manager) + + # Test communication costs + fit_ins_list = [fit_ins for _, fit_ins in return_clients] + self.comm_tracker.track(fit_ins_list) + + return return_clients + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ): + """Aggregate fit results using weighted average.""" + # Test communication costs + fit_res_list = [fit_res for _, fit_res in results] + self.comm_tracker.track(fit_res_list) + + parameters_aggregated, metrics_aggregated = super().aggregate_fit( + server_round, results, failures + ) + + return parameters_aggregated, metrics_aggregated + + +class CommunicationTracker: + """Communication costs tracker over FL rounds.""" + def __init__(self): + self.curr_comm_cost = 0.0 + + @staticmethod + def _compute_bytes(parameters): + return sum([BytesIO(t).getbuffer().nbytes for t in parameters.tensors]) + + def track(self, fit_list: List[Union[FitIns, FitRes]]): + size_bytes_list = [ + self._compute_bytes(fit_ele.parameters) + for fit_ele in fit_list + ] + comm_cost = sum(size_bytes_list) / 1024**2 + + self.curr_comm_cost += comm_cost + log( + INFO, + "Communication budget: used %.2f MB (+%.2f MB this round) / 200,000 MB", + self.curr_comm_cost, + comm_cost, + ) + + if self.curr_comm_cost > 2e5: + log( + WARN, + "The accumulated communication cost has exceeded 200,000 MB. " + "Please consider reducing it if you plan to participate " + "FlowerTune LLM Leaderboard.", + ) diff --git a/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl new file mode 100644 index 000000000000..8a914fcf60d1 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl @@ -0,0 +1,80 @@ +"""$project_name: A Flower Baseline.""" + +from collections import OrderedDict + +import torch +import torch.nn.functional as F +from torch import nn + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz').""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + """Do forward.""" + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss() + criterion.to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + """Extract model parameters as numpy arrays from state_dict.""" + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + """Apply parameters to an existing model.""" + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl new file mode 100644 index 000000000000..ea536e3efffb --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl @@ -0,0 +1,46 @@ +"""$project_name: A Flower Baseline.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.model import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Do weighted average of accuracy metric.""" + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * float(m["accuracy"]) for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + fraction_fit=float(fraction_fit), + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=int(num_rounds)) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl index 5491f6616160..16f94f0a64e9 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl @@ -1,18 +1,33 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context -from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from transformers import AutoModelForSequenceClassification + +from $import_name.task import get_weights def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) # Define strategy strategy = FedAvg( - fraction_fit=1.0, + fraction_fit=fraction_fit, fraction_evaluate=1.0, + initial_parameters=initial_parameters, ) config = ServerConfig(num_rounds=num_rounds) diff --git a/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl index ad52e2c3fe21..1c50e85d7103 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl @@ -4,24 +4,25 @@ import warnings from collections import OrderedDict import torch +import transformers +from datasets.utils.logging import disable_progress_bar from evaluate import load as load_metric +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoTokenizer, DataCollatorWithPadding -from flwr_datasets import FederatedDataset -from flwr_datasets.partitioner import IidPartitioner - - warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint +warnings.filterwarnings("ignore", category=FutureWarning) +disable_progress_bar() +transformers.logging.set_verbosity_error() fds = None # Cache FederatedDataset -def load_data(partition_id: int, num_partitions: int): +def load_data(partition_id: int, num_partitions: int, model_name: str): """Load IMDB data (training and eval)""" # Only initialize `FederatedDataset` once global fds @@ -35,10 +36,12 @@ def load_data(partition_id: int, num_partitions: int): # Divide data: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) partition_train_test = partition_train_test.map(tokenize_function, batched=True) partition_train_test = partition_train_test.remove_columns("text") @@ -59,12 +62,12 @@ def load_data(partition_id: int, num_partitions: int): return trainloader, testloader -def train(net, trainloader, epochs): +def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() @@ -72,12 +75,12 @@ def train(net, trainloader, epochs): optimizer.zero_grad() -def test(net, testloader): +def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits diff --git a/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl new file mode 100644 index 000000000000..71afc184ffa9 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl @@ -0,0 +1,138 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.11.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.metadata] +allow-direct-references = true + +[project.optional-dependencies] +dev = [ + "isort==5.13.2", + "black==24.2.0", + "docformatter==1.7.5", + "mypy==1.8.0", + "pylint==3.2.6", + "flake8==5.0.4", + "pytest==6.2.4", + "pytest-watch==4.2.0", + "ruff==0.1.9", + "types-requests==2.31.0.20240125", +] + +[tool.isort] +profile = "black" +known_first_party = ["flwr"] + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y,K,N" +max-args = 10 +max-attributes = 15 +max-locals = 36 +max-branches = 20 +max-statements = 55 + +[tool.pylint.typecheck] +generated-members = "numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "$username" + +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 +local-epochs = 1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl index b564a66090d2..8a4d49e7fd84 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl @@ -8,15 +8,16 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.9.0,<2.0", - "flwr-datasets>=0.1.0,<1.0.0", - "hydra-core==1.3.2", + "flwr[simulation]>=1.11.1", + "flwr-datasets>=0.3.0", "trl==0.8.1", "bitsandbytes==0.43.0", "scipy==1.13.0", "peft==0.6.2", "transformers==4.39.3", "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", ] [tool.hatch.build.targets.wheel] @@ -26,14 +27,41 @@ packages = ["."] publisher = "$username" [tool.flwr.app.components] -serverapp = "$import_name.app:server" -clientapp = "$import_name.app:client" +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" [tool.flwr.app.config] -num-server-rounds = 3 +model.name = "mistralai/Mistral-7B-v0.3" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = $fraction_fit +strategy.fraction-evaluate = 0.0 +num-server-rounds = 200 + +[tool.flwr.app.config.static] +dataset.name = "$dataset_name" [tool.flwr.federations] default = "local-simulation" [tool.flwr.federations.local-simulation] -options.num-supernodes = 10 +options.num-supernodes = $num_clients +options.backend.client-resources.num-cpus = 6 +options.backend.client-resources.num-gpus = 1.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl index 15dc2af87a3f..af1e4d005114 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.11.0", "flwr-datasets>=0.3.0", "torch==2.2.1", "transformers>=4.30.0,<5.0", @@ -29,10 +29,18 @@ clientapp = "$import_name.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 +fraction-fit = 0.5 local-epochs = 1 +model-name = "prajjwal1/bert-tiny" # Set a larger model if you have access to more GPU resources +num-labels = 2 [tool.flwr.federations] default = "localhost" [tool.flwr.federations.localhost] options.num-supernodes = 10 + +[tool.flwr.federations.localhost-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index b2c4dc4151cd..905055ac70c0 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -20,10 +20,9 @@ import sys from logging import DEBUG from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Annotated, Any, Optional import typer -from typing_extensions import Annotated from flwr.cli.build import build from flwr.cli.config_utils import load_and_validate @@ -52,7 +51,7 @@ def run( typer.Argument(help="Name of the federation to run the app on."), ] = None, config_overrides: Annotated[ - Optional[List[str]], + Optional[list[str]], typer.Option( "--run-config", "-c", @@ -124,14 +123,14 @@ def run( def _run_with_superexec( - app: Optional[Path], - federation_config: Dict[str, Any], - config_overrides: Optional[List[str]], + app: Path, + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], ) -> None: insecure_str = federation_config.get("insecure") if root_certificates := federation_config.get("root-certificates"): - root_certificates_bytes = Path(root_certificates).read_bytes() + root_certificates_bytes = (app / root_certificates).read_bytes() if insecure := bool(insecure_str): typer.secho( "❌ `root_certificates` were provided but the `insecure` parameter" @@ -187,8 +186,8 @@ def _run_with_superexec( def _run_without_superexec( app: Optional[Path], - federation_config: Dict[str, Any], - config_overrides: Optional[List[str]], + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], federation: str, ) -> None: try: diff --git a/src/py/flwr/cli/utils.py b/src/py/flwr/cli/utils.py index 2f5a8831fa7c..e725fdd3f951 100644 --- a/src/py/flwr/cli/utils.py +++ b/src/py/flwr/cli/utils.py @@ -17,7 +17,7 @@ import hashlib import re from pathlib import Path -from typing import Callable, List, Optional, cast +from typing import Callable, Optional, cast import typer @@ -40,7 +40,7 @@ def prompt_text( return cast(str, result) -def prompt_options(text: str, options: List[str]) -> str: +def prompt_options(text: str, options: list[str]) -> str: """Ask user to select one of the given options and return the selected item.""" # Turn options into a list with index as in " [ 0] quickstart-pytorch" options_formatted = [ diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index 218f2fe20d62..dce3be9036bb 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -20,8 +20,6 @@ from .client import Client as Client from .client_app import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient -from .supernode import run_client_app as run_client_app -from .supernode import run_supernode as run_supernode from .typing import ClientFn as ClientFn from .typing import ClientFnExt as ClientFnExt @@ -32,8 +30,6 @@ "ClientFnExt", "NumPyClient", "mod", - "run_client_app", - "run_supernode", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index fb4855a09817..90c50aba7fad 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -18,10 +18,11 @@ import subprocess import sys import time +from contextlib import AbstractContextManager from dataclasses import dataclass from logging import ERROR, INFO, WARN from pathlib import Path -from typing import Callable, ContextManager, Dict, Optional, Tuple, Type, Union, cast +from typing import Callable, Optional, Union, cast import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -35,6 +36,7 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH, Context, EventType, Message, event from flwr.common.address import parse_address from flwr.common.constant import ( + CLIENTAPPIO_API_DEFAULT_ADDRESS, MISSING_EXTRA_REST, RUN_ID_NUM_BYTES, TRANSPORT_TYPE_GRPC_ADAPTER, @@ -60,8 +62,6 @@ from .node_state import NodeState from .numpy_client import NumPyClient -ADDRESS_CLIENTAPPIO_API_GRPC_RERE = "0.0.0.0:9094" - ISOLATION_MODE_SUBPROCESS = "subprocess" ISOLATION_MODE_PROCESS = "process" @@ -95,7 +95,7 @@ def start_client( insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, @@ -205,13 +205,13 @@ def start_client_internal( insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, flwr_path: Optional[Path] = None, isolation: Optional[str] = None, - supernode_address: Optional[str] = ADDRESS_CLIENTAPPIO_API_GRPC_RERE, + supernode_address: Optional[str] = CLIENTAPPIO_API_DEFAULT_ADDRESS, ) -> None: """Start a Flower client node which connects to a Flower server. @@ -266,7 +266,7 @@ class `flwr.client.Client` (default: None) by the SueprNode and communicates using gRPC at the address `supernode_address`. If `process`, the `ClientApp` runs in a separate isolated process and communicates using gRPC at the address `supernode_address`. - supernode_address : Optional[str] (default: `ADDRESS_CLIENTAPPIO_API_GRPC_RERE`) + supernode_address : Optional[str] (default: `CLIENTAPPIO_API_DEFAULT_ADDRESS`) The SuperNode gRPC server address. """ if insecure is None: @@ -357,7 +357,7 @@ def _on_backoff(retry_state: RetryState) -> None: # NodeState gets initialized when the first connection is established node_state: Optional[NodeState] = None - runs: Dict[int, Run] = {} + runs: dict[int, Run] = {} while not app_state_tracker.interrupt: sleep_duration: int = 0 @@ -690,7 +690,7 @@ def start_numpy_client( ) -def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ +def _init_connection(transport: Optional[str], server_address: str) -> tuple[ Callable[ [ str, @@ -698,10 +698,10 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ RetryInvoker, int, Union[bytes, str, None], - Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], + Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], ], - ContextManager[ - Tuple[ + AbstractContextManager[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], @@ -712,7 +712,7 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ ], ], str, - Type[Exception], + type[Exception], ]: # Parse IP address parsed_address = parse_address(server_address) @@ -770,7 +770,7 @@ def signal_handler(sig, frame): # type: ignore signal.signal(signal.SIGTERM, signal_handler) -def run_clientappio_api_grpc(address: str) -> Tuple[grpc.Server, ClientAppIoServicer]: +def run_clientappio_api_grpc(address: str) -> tuple[grpc.Server, ClientAppIoServicer]: """Run ClientAppIo API gRPC server.""" clientappio_servicer: grpc.Server = ClientAppIoServicer() clientappio_add_servicer_to_server_fn = add_ClientAppIoServicer_to_server diff --git a/src/py/flwr/client/app_test.py b/src/py/flwr/client/app_test.py index 74ade03f973a..723a066ea0bc 100644 --- a/src/py/flwr/client/app_test.py +++ b/src/py/flwr/client/app_test.py @@ -15,8 +15,6 @@ """Flower Client app tests.""" -from typing import Dict, Tuple - from flwr.common import ( Config, EvaluateIns, @@ -59,7 +57,7 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: class NeedsWrappingClient(NumPyClient): """Client implementation extending the high-level NumPyClient.""" - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() @@ -69,13 +67,13 @@ def get_parameters(self, config: Config) -> NDArrays: def fit( self, parameters: NDArrays, config: Config - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() def evaluate( self, parameters: NDArrays, config: Config - ) -> Tuple[float, int, Dict[str, Scalar]]: + ) -> tuple[float, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index 2a913b3a248d..234d84f27782 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -16,7 +16,7 @@ import inspect -from typing import Callable, List, Optional +from typing import Callable, Optional from flwr.client.client import Client from flwr.client.message_handler.message_handler import ( @@ -41,11 +41,11 @@ def _alert_erroneous_client_fn() -> None: def _inspect_maybe_adapt_client_fn_signature(client_fn: ClientFnExt) -> ClientFnExt: client_fn_args = inspect.signature(client_fn).parameters - first_arg = list(client_fn_args.keys())[0] if len(client_fn_args) != 1: _alert_erroneous_client_fn() + first_arg = list(client_fn_args.keys())[0] first_arg_type = client_fn_args[first_arg].annotation if first_arg_type is str or first_arg == "cid": @@ -109,9 +109,9 @@ class ClientApp: def __init__( self, client_fn: Optional[ClientFnExt] = None, # Only for backward compatibility - mods: Optional[List[Mod]] = None, + mods: Optional[list[Mod]] = None, ) -> None: - self._mods: List[Mod] = mods if mods is not None else [] + self._mods: list[Mod] = mods if mods is not None else [] # Create wrapper function for `handle` self._call: Optional[ClientAppCallable] = None @@ -263,7 +263,7 @@ def _registration_error(fn_name: str) -> ValueError: >>> class FlowerClient(NumPyClient): >>> # ... >>> - >>> def client_fn(cid) -> Client: + >>> def client_fn(context: Context): >>> return FlowerClient().to_client() >>> >>> app = ClientApp( diff --git a/src/py/flwr/client/clientapp/app.py b/src/py/flwr/client/clientapp/app.py index 69d334fead14..f493128bebac 100644 --- a/src/py/flwr/client/clientapp/app.py +++ b/src/py/flwr/client/clientapp/app.py @@ -17,7 +17,7 @@ import argparse import time from logging import DEBUG, ERROR, INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -196,7 +196,7 @@ def get_token(stub: grpc.Channel) -> Optional[int]: def pull_message( stub: grpc.Channel, token: int -) -> Tuple[Message, Context, Run, Optional[Fab]]: +) -> tuple[Message, Context, Run, Optional[Fab]]: """Pull message from SuperNode to ClientApp.""" log(INFO, "Pulling ClientAppInputs for token %s", token) try: diff --git a/src/py/flwr/client/dpfedavg_numpy_client.py b/src/py/flwr/client/dpfedavg_numpy_client.py index c592d10936d5..bade811b48ce 100644 --- a/src/py/flwr/client/dpfedavg_numpy_client.py +++ b/src/py/flwr/client/dpfedavg_numpy_client.py @@ -16,7 +16,6 @@ import copy -from typing import Dict, Tuple import numpy as np @@ -39,7 +38,7 @@ def __init__(self, client: NumPyClient) -> None: super().__init__() self.client = client - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Get client properties using the given Numpy client. Parameters @@ -58,7 +57,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: """ return self.client.get_properties(config) - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -76,8 +75,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return self.client.get_parameters(config) def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. This method first updates the local model using the original parameters @@ -153,8 +152,8 @@ def fit( return updated_params, num_examples, metrics def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters diff --git a/src/py/flwr/client/grpc_adapter_client/connection.py b/src/py/flwr/client/grpc_adapter_client/connection.py index f9f7b1043524..9b84545eacdb 100644 --- a/src/py/flwr/client/grpc_adapter_client/connection.py +++ b/src/py/flwr/client/grpc_adapter_client/connection.py @@ -15,9 +15,10 @@ """Contextmanager for a GrpcAdapter channel to the Flower server.""" +from collections.abc import Iterator from contextlib import contextmanager from logging import ERROR -from typing import Callable, Iterator, Optional, Tuple, Union +from typing import Callable, Optional, Union from cryptography.hazmat.primitives.asymmetric import ec @@ -38,10 +39,10 @@ def grpc_adapter( # pylint: disable=R0913 max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 489891f55436..29479cf5479d 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -16,11 +16,12 @@ import uuid +from collections.abc import Iterator from contextlib import contextmanager from logging import DEBUG, ERROR from pathlib import Path from queue import Queue -from typing import Callable, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from cryptography.hazmat.primitives.asymmetric import ec @@ -66,10 +67,10 @@ def grpc_connection( # pylint: disable=R0913, R0915 max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index bd377ef3470a..13bd2c6af8e7 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -17,8 +17,9 @@ import concurrent.futures import socket +from collections.abc import Iterator from contextlib import closing -from typing import Iterator, cast +from typing import cast from unittest.mock import patch import grpc diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py index d2dded8a73d9..653e384aff96 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -17,11 +17,14 @@ import base64 import collections -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from collections.abc import Sequence +from logging import WARNING +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec +from flwr.common.logger import log from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( bytes_to_public_key, compute_hmac, @@ -51,7 +54,7 @@ def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -128,13 +131,12 @@ def intercept_unary_unary( if self.shared_secret is None: raise RuntimeError("Failure to compute hmac") + message_bytes = request.SerializeToString(deterministic=True) metadata.append( ( _AUTH_TOKEN_HEADER, base64.urlsafe_b64encode( - compute_hmac( - self.shared_secret, request.SerializeToString(True) - ) + compute_hmac(self.shared_secret, message_bytes) ), ) ) @@ -151,8 +153,15 @@ def intercept_unary_unary( server_public_key_bytes = base64.urlsafe_b64decode( _get_value_from_tuples(_PUBLIC_KEY_HEADER, response.initial_metadata()) ) - self.server_public_key = bytes_to_public_key(server_public_key_bytes) - self.shared_secret = generate_shared_key( - self.private_key, self.server_public_key - ) + + if server_public_key_bytes != b"": + self.server_public_key = bytes_to_public_key(server_public_key_bytes) + else: + log(WARNING, "Can't get server public key, SuperLink may be offline") + + if self.server_public_key is not None: + self.shared_secret = generate_shared_key( + self.private_key, self.server_public_key + ) + return response diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py index 79416a8eb31b..27f759a71713 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -18,9 +18,10 @@ import base64 import threading import unittest +from collections.abc import Sequence from concurrent import futures from logging import DEBUG, INFO, WARN -from typing import Optional, Sequence, Tuple, Union +from typing import Optional, Union import grpc @@ -60,7 +61,7 @@ def __init__(self) -> None: """Initialize mock servicer.""" self._lock = threading.Lock() self._received_client_metadata: Optional[ - Sequence[Tuple[str, Union[str, bytes]]] + Sequence[tuple[str, Union[str, bytes]]] ] = None self.server_private_key, self.server_public_key = generate_key_pairs() self._received_message_bytes: bytes = b"" @@ -73,7 +74,7 @@ def unary_unary( """Handle unary call.""" with self._lock: self._received_client_metadata = context.invocation_metadata() - self._received_message_bytes = request.SerializeToString(True) + self._received_message_bytes = request.SerializeToString(deterministic=True) if isinstance(request, CreateNodeRequest): context.send_initial_metadata( @@ -105,7 +106,7 @@ def unary_unary( def received_client_metadata( self, - ) -> Optional[Sequence[Tuple[str, Union[str, bytes]]]]: + ) -> Optional[Sequence[tuple[str, Union[str, bytes]]]]: """Return received client metadata.""" with self._lock: return self._received_client_metadata @@ -151,7 +152,7 @@ def _add_generic_handler(servicer: _MockServicer, server: grpc.Server) -> None: def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -164,7 +165,7 @@ def _init_retry_invoker() -> RetryInvoker: return RetryInvoker( wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, - max_tries=None, + max_tries=1, max_time=None, on_giveup=lambda retry_state: ( log( @@ -415,6 +416,27 @@ def test_client_auth_get_run(self) -> None: assert actual_public_key == expected_public_key assert actual_hmac == expected_hmac + def test_without_servicer(self) -> None: + """Test client authentication without servicer.""" + # Prepare + self._server.stop(grace=None) + retry_invoker = _init_retry_invoker() + + # Execute and Assert + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, create_node, _, _, _ = conn + assert create_node is not None + create_node() + + assert self._servicer.received_client_metadata() is None + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 8bae253c819a..7ce3d37b7a17 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -17,11 +17,12 @@ import random import threading +from collections.abc import Iterator, Sequence from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Iterator, Optional, Sequence, Tuple, Type, Union, cast +from typing import Callable, Optional, Union, cast import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -77,11 +78,11 @@ def grpc_request_response( # pylint: disable=R0913, R0914, R0915 max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, - adapter_cls: Optional[Union[Type[FleetStub], Type[GrpcAdapter]]] = None, + adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], diff --git a/src/py/flwr/client/grpc_rere_client/grpc_adapter.py b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py index fde03943a852..3dce14c14956 100644 --- a/src/py/flwr/client/grpc_rere_client/grpc_adapter.py +++ b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py @@ -17,7 +17,7 @@ import sys from logging import DEBUG -from typing import Any, Type, TypeVar, cast +from typing import Any, TypeVar, cast import grpc from google.protobuf.message import Message as GrpcMessage @@ -59,7 +59,7 @@ def __init__(self, channel: grpc.Channel) -> None: self.stub = GrpcAdapterStub(channel) def _send_and_receive( - self, request: GrpcMessage, response_type: Type[T], **kwargs: Any + self, request: GrpcMessage, response_type: type[T], **kwargs: Any ) -> T: # Serialize request container_req = MessageContainer( diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 1ab84eb01468..765c6a6b2e91 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -15,7 +15,7 @@ """Client-side message handler.""" from logging import WARN -from typing import Optional, Tuple, cast +from typing import Optional, cast from flwr.client.client import ( maybe_call_evaluate, @@ -52,7 +52,7 @@ class UnknownServerMessage(Exception): """Exception indicating that the received message is unknown.""" -def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: +def handle_control_message(message: Message) -> tuple[Optional[Message], int]: """Handle control part of the incoming message. Parameters @@ -147,7 +147,7 @@ def handle_legacy_message_from_msgtype( def _reconnect( reconnect_msg: ServerMessage.ReconnectIns, -) -> Tuple[ClientMessage, int]: +) -> tuple[ClientMessage, int]: # Determine the reason for sending DisconnectRes message reason = Reason.ACK sleep_duration = None diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 557d61ffb32a..311f8c37e1b1 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -19,7 +19,6 @@ import unittest import uuid from copy import copy -from typing import List from flwr.client import Client from flwr.client.typing import ClientFnExt @@ -294,7 +293,7 @@ def test_invalid_message_run_id(self) -> None: msg = Message(metadata=self.valid_out_metadata, content=RecordSet()) # Execute - invalid_metadata_list: List[Metadata] = [] + invalid_metadata_list: list[Metadata] = [] attrs = list(vars(self.valid_out_metadata).keys()) for attr in attrs: if attr == "_partition_id": diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 5b196ad84321..f9d3c433157d 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -18,7 +18,7 @@ import os from dataclasses import dataclass, field from logging import DEBUG, WARNING -from typing import Any, Dict, List, Tuple, cast +from typing import Any, cast from flwr.client.typing import ClientAppCallable from flwr.common import ( @@ -91,11 +91,11 @@ class SecAggPlusState: # Random seed for generating the private mask rd_seed: bytes = b"" - rd_seed_share_dict: Dict[int, bytes] = field(default_factory=dict) - sk1_share_dict: Dict[int, bytes] = field(default_factory=dict) + rd_seed_share_dict: dict[int, bytes] = field(default_factory=dict) + sk1_share_dict: dict[int, bytes] = field(default_factory=dict) # The dict of the shared secrets from sk2 - ss2_dict: Dict[int, bytes] = field(default_factory=dict) - public_keys_dict: Dict[int, Tuple[bytes, bytes]] = field(default_factory=dict) + ss2_dict: dict[int, bytes] = field(default_factory=dict) + public_keys_dict: dict[int, tuple[bytes, bytes]] = field(default_factory=dict) def __init__(self, **kwargs: ConfigsRecordValues) -> None: for k, v in kwargs.items(): @@ -104,8 +104,8 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v: Any = v if k.endswith(":K"): k = k[:-2] - keys = cast(List[int], v) - values = cast(List[bytes], kwargs[f"{k}:V"]) + keys = cast(list[int], v) + values = cast(list[bytes], kwargs[f"{k}:V"]) if len(values) > len(keys): updated_values = [ tuple(values[i : i + 2]) for i in range(0, len(values), 2) @@ -115,17 +115,17 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v = dict(zip(keys, values)) self.__setattr__(k, new_v) - def to_dict(self) -> Dict[str, ConfigsRecordValues]: + def to_dict(self) -> dict[str, ConfigsRecordValues]: """Convert the state to a dictionary.""" ret = vars(self) for k in list(ret.keys()): if isinstance(ret[k], dict): # Replace dict with two lists - v = cast(Dict[str, Any], ret.pop(k)) + v = cast(dict[str, Any], ret.pop(k)) ret[f"{k}:K"] = list(v.keys()) if k == "public_keys_dict": - v_list: List[bytes] = [] - for b1_b2 in cast(List[Tuple[bytes, bytes]], v.values()): + v_list: list[bytes] = [] + for b1_b2 in cast(list[tuple[bytes, bytes]], v.values()): v_list.extend(b1_b2) ret[f"{k}:V"] = v_list else: @@ -276,7 +276,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -299,7 +299,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -314,7 +314,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: def _setup( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: # Assigning parameter values to object fields sec_agg_param_dict = configs state.sample_num = cast(int, sec_agg_param_dict[Key.SAMPLE_NUMBER]) @@ -350,8 +350,8 @@ def _setup( # pylint: disable-next=too-many-locals def _share_keys( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: - named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], configs) +) -> dict[str, ConfigsRecordValues]: + named_bytes_tuples = cast(dict[str, tuple[bytes, bytes]], configs) key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()} log(DEBUG, "Node %d: starting stage 1...", state.nid) state.public_keys_dict = key_dict @@ -361,7 +361,7 @@ def _share_keys( raise ValueError("Available neighbours number smaller than threshold") # Check if all public keys are unique - pk_list: List[bytes] = [] + pk_list: list[bytes] = [] for pk1, pk2 in state.public_keys_dict.values(): pk_list.append(pk1) pk_list.append(pk2) @@ -415,11 +415,11 @@ def _collect_masked_vectors( configs: ConfigsRecord, num_examples: int, updated_parameters: Parameters, -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 2...", state.nid) - available_clients: List[int] = [] - ciphertexts = cast(List[bytes], configs[Key.CIPHERTEXT_LIST]) - srcs = cast(List[int], configs[Key.SOURCE_LIST]) + available_clients: list[int] = [] + ciphertexts = cast(list[bytes], configs[Key.CIPHERTEXT_LIST]) + srcs = cast(list[int], configs[Key.SOURCE_LIST]) if len(ciphertexts) + 1 < state.threshold: raise ValueError("Not enough available neighbour clients.") @@ -467,7 +467,7 @@ def _collect_masked_vectors( quantized_parameters = factor_combine(q_ratio, quantized_parameters) - dimensions_list: List[Tuple[int, ...]] = [a.shape for a in quantized_parameters] + dimensions_list: list[tuple[int, ...]] = [a.shape for a in quantized_parameters] # Add private mask private_mask = pseudo_rand_gen(state.rd_seed, state.mod_range, dimensions_list) @@ -499,11 +499,11 @@ def _collect_masked_vectors( def _unmask( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 3...", state.nid) - active_nids = cast(List[int], configs[Key.ACTIVE_NODE_ID_LIST]) - dead_nids = cast(List[int], configs[Key.DEAD_NODE_ID_LIST]) + active_nids = cast(list[int], configs[Key.ACTIVE_NODE_ID_LIST]) + dead_nids = cast(list[int], configs[Key.DEAD_NODE_ID_LIST]) # Send private mask seed share for every avaliable client (including itself) # Send first private key share for building pairwise mask for every dropped client if len(active_nids) < state.threshold: diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index 2832576fb4fc..e68bf5177797 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -16,7 +16,7 @@ import unittest from itertools import product -from typing import Callable, Dict, List +from typing import Callable from flwr.client.mod import make_ffn from flwr.common import ( @@ -41,7 +41,7 @@ def get_test_handler( ctxt: Context, -) -> Callable[[Dict[str, ConfigsRecordValues]], ConfigsRecord]: +) -> Callable[[dict[str, ConfigsRecordValues]], ConfigsRecord]: """.""" def empty_ffn(_msg: Message, _2: Context) -> Message: @@ -49,7 +49,7 @@ def empty_ffn(_msg: Message, _2: Context) -> Message: app = make_ffn(empty_ffn, [secaggplus_mod]) - def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: + def func(configs: dict[str, ConfigsRecordValues]) -> ConfigsRecord: in_msg = Message( metadata=Metadata( run_id=0, @@ -158,7 +158,7 @@ def test_stage_setup_check(self) -> None: (Key.MOD_RANGE, int), ] - type_to_test_value: Dict[type, ConfigsRecordValues] = { + type_to_test_value: dict[type, ConfigsRecordValues] = { int: 10, bool: True, float: 1.0, @@ -166,7 +166,7 @@ def test_stage_setup_check(self) -> None: bytes: b"test", } - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { key: type_to_test_value[value_type] for key, value_type in valid_key_type_pairs } @@ -208,7 +208,7 @@ def test_stage_share_keys_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { "1": [b"public key 1", b"public key 2"], "2": [b"public key 1", b"public key 2"], "3": [b"public key 1", b"public key 2"], @@ -225,7 +225,7 @@ def test_stage_share_keys_check(self) -> None: valid_configs[Key.STAGE] = Stage.SHARE_KEYS # Test invalid configs - invalid_values: List[ConfigsRecordValues] = [ + invalid_values: list[ConfigsRecordValues] = [ b"public key 1", [b"public key 1"], [b"public key 1", b"public key 2", b"public key 3"], @@ -245,7 +245,7 @@ def test_stage_collect_masked_vectors_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.CIPHERTEXT_LIST: [b"ctxt!", b"ctxt@", b"ctxt#", b"ctxt?"], Key.SOURCE_LIST: [32, 51324, 32324123, -3], } @@ -289,7 +289,7 @@ def test_stage_unmask_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.ACTIVE_NODE_ID_LIST: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], Key.DEAD_NODE_ID_LIST: [32, 51324, 32324123, -3], } diff --git a/src/py/flwr/client/mod/utils.py b/src/py/flwr/client/mod/utils.py index c8fb21379783..c76902cf263f 100644 --- a/src/py/flwr/client/mod/utils.py +++ b/src/py/flwr/client/mod/utils.py @@ -15,13 +15,11 @@ """Utility functions for mods.""" -from typing import List - from flwr.client.typing import ClientAppCallable, Mod from flwr.common import Context, Message -def make_ffn(ffn: ClientAppCallable, mods: List[Mod]) -> ClientAppCallable: +def make_ffn(ffn: ClientAppCallable, mods: list[Mod]) -> ClientAppCallable: """.""" def wrap_ffn(_ffn: ClientAppCallable, _mod: Mod) -> ClientAppCallable: diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index a5bbd0a0bb4d..e75fb5530b2c 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -16,7 +16,7 @@ import unittest -from typing import List, cast +from typing import cast from flwr.client.typing import ClientAppCallable, Mod from flwr.common import ( @@ -43,7 +43,7 @@ def _increment_context_counter(context: Context) -> None: context.state.metrics_records[METRIC] = MetricsRecord({COUNTER: current_counter}) -def make_mock_mod(name: str, footprint: List[str]) -> Mod: +def make_mock_mod(name: str, footprint: list[str]) -> Mod: """Make a mock mod.""" def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: @@ -61,7 +61,7 @@ def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: return mod -def make_mock_app(name: str, footprint: List[str]) -> ClientAppCallable: +def make_mock_app(name: str, footprint: list[str]) -> ClientAppCallable: """Make a mock app.""" def app(message: Message, context: Context) -> Message: @@ -97,7 +97,7 @@ class TestMakeApp(unittest.TestCase): def test_multiple_mods(self) -> None: """Test if multiple mods are called in the correct order.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) mock_mod_names = [f"mod{i}" for i in range(1, 15)] mock_mods = [make_mock_mod(name, footprint) for name in mock_mod_names] @@ -127,7 +127,7 @@ def test_multiple_mods(self) -> None: def test_filter(self) -> None: """Test if a mod can filter incoming TaskIns.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) message = _get_dummy_flower_message() diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/node_state.py index e16d7e34715d..e7967dfc8bee 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/node_state.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from pathlib import Path -from typing import Dict, Optional +from typing import Optional from flwr.common import Context, RecordSet from flwr.common.config import ( @@ -46,7 +46,7 @@ def __init__( ) -> None: self.node_id = node_id self.node_config = node_config - self.run_infos: Dict[int, RunInfo] = {} + self.run_infos: dict[int, RunInfo] = {} # pylint: disable=too-many-arguments def register_context( diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index b21a51b38e9b..6a656cb661d2 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -16,7 +16,7 @@ from abc import ABC -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client import Client from flwr.common import ( @@ -73,7 +73,7 @@ class NumPyClient(ABC): _context: Context - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return a client's set of properties. Parameters @@ -93,7 +93,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: _ = (self, config) return {} - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -112,8 +112,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. Parameters @@ -141,8 +141,8 @@ def fit( return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters @@ -310,7 +310,7 @@ def _set_context(self: Client, context: Context) -> None: def _wrap_numpy_client(client: NumPyClient) -> Client: - member_dict: Dict[str, Callable] = { # type: ignore + member_dict: dict[str, Callable] = { # type: ignore "__init__": _constructor, "get_context": _get_context, "set_context": _set_context, diff --git a/src/py/flwr/client/numpy_client_test.py b/src/py/flwr/client/numpy_client_test.py index 06a0deafe2c9..c5d520a73ce1 100644 --- a/src/py/flwr/client/numpy_client_test.py +++ b/src/py/flwr/client/numpy_client_test.py @@ -15,8 +15,6 @@ """Flower NumPyClient tests.""" -from typing import Dict, Tuple - from flwr.common import Config, NDArrays, Properties, Scalar from .numpy_client import ( @@ -40,14 +38,14 @@ def get_parameters(self, config: Config) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Simulate training by returning empty weights, 0 samples, empty metrics.""" return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Simulate evaluate by returning 0.0 loss, 0 samples, empty metrics.""" return 0.0, 0, {} diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d5f005fbaf77..72b6be25a708 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -18,10 +18,11 @@ import random import sys import threading +from collections.abc import Iterator from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Iterator, Optional, Tuple, Type, TypeVar, Union +from typing import Callable, Optional, TypeVar, Union from cryptography.hazmat.primitives.asymmetric import ec from google.protobuf.message import Message as GrpcMessage @@ -90,10 +91,10 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 Union[bytes, str] ] = None, # pylint: disable=unused-argument authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], @@ -173,7 +174,7 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 ########################################################################### def _request( - req: GrpcMessage, res_type: Type[T], api_path: str, retry: bool = True + req: GrpcMessage, res_type: type[T], api_path: str, retry: bool = True ) -> Optional[T]: # Serialize the request req_bytes = req.SerializeToString() diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py index 8d28e69dea6e..d9af001bba53 100644 --- a/src/py/flwr/client/supernode/app.py +++ b/src/py/flwr/client/supernode/app.py @@ -18,7 +18,7 @@ import sys from logging import DEBUG, ERROR, INFO, WARN from pathlib import Path -from typing import Optional, Tuple +from typing import Optional from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.primitives.asymmetric import ec @@ -30,6 +30,7 @@ from flwr.common import EventType, event from flwr.common.config import parse_config_args from flwr.common.constant import ( + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, @@ -44,8 +45,6 @@ ) from ..clientapp.utils import get_load_client_app_fn -ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" - def run_supernode() -> None: """Run Flower SuperNode.""" @@ -77,7 +76,9 @@ def run_supernode() -> None: authentication_keys=authentication_keys, max_retries=args.max_retries, max_wait_time=args.max_wait_time, - node_config=parse_config_args([args.node_config]), + node_config=parse_config_args( + [args.node_config] if args.node_config else args.node_config + ), isolation=args.isolation, supernode_address=args.supernode_address, ) @@ -101,11 +102,11 @@ def run_client_app() -> None: def _warn_deprecated_server_arg(args: argparse.Namespace) -> None: """Warn about the deprecated argument `--server`.""" - if args.server != ADDRESS_FLEET_API_GRPC_RERE: + if args.server != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: warn = "Passing flag --server is deprecated. Use --superlink instead." warn_deprecated_feature(warn) - if args.superlink != ADDRESS_FLEET_API_GRPC_RERE: + if args.superlink != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: # if `--superlink` also passed, then # warn user that this argument overrides what was passed with `--server` log( @@ -245,12 +246,12 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: ) parser.add_argument( "--server", - default=ADDRESS_FLEET_API_GRPC_RERE, + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, help="Server address", ) parser.add_argument( "--superlink", - default=ADDRESS_FLEET_API_GRPC_RERE, + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, help="SuperLink Fleet API (gRPC-rere) address (IPv4, IPv6, or a domain name)", ) parser.add_argument( @@ -290,7 +291,7 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: def _try_setup_client_authentication( args: argparse.Namespace, -) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: +) -> Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if not args.auth_supernode_private_key and not args.auth_supernode_public_key: return None diff --git a/src/py/flwr/common/address.py b/src/py/flwr/common/address.py index 7a70925c0fc9..2b10097ccb71 100644 --- a/src/py/flwr/common/address.py +++ b/src/py/flwr/common/address.py @@ -16,12 +16,12 @@ import socket from ipaddress import ip_address -from typing import Optional, Tuple +from typing import Optional IPV6: int = 6 -def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]: +def parse_address(address: str) -> Optional[tuple[str, int, Optional[bool]]]: """Parse an IP address into host, port, and version. Parameters diff --git a/src/py/flwr/common/config.py b/src/py/flwr/common/config.py index eec7cfb726b7..071d41a3ab5e 100644 --- a/src/py/flwr/common/config.py +++ b/src/py/flwr/common/config.py @@ -17,7 +17,7 @@ import os import re from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union, cast, get_args +from typing import Any, Optional, Union, cast, get_args import tomli @@ -53,7 +53,7 @@ def get_project_dir( return Path(flwr_dir) / APP_DIR / publisher / project_name / fab_version -def get_project_config(project_dir: Union[str, Path]) -> Dict[str, Any]: +def get_project_config(project_dir: Union[str, Path]) -> dict[str, Any]: """Return pyproject.toml in the given project directory.""" # Load pyproject.toml file toml_path = Path(project_dir) / FAB_CONFIG_FILE @@ -137,13 +137,13 @@ def get_fused_config(run: Run, flwr_dir: Optional[Path]) -> UserConfig: def flatten_dict( - raw_dict: Optional[Dict[str, Any]], parent_key: str = "" + raw_dict: Optional[dict[str, Any]], parent_key: str = "" ) -> UserConfig: """Flatten dict by joining nested keys with a given separator.""" if raw_dict is None: return {} - items: List[Tuple[str, UserConfigValue]] = [] + items: list[tuple[str, UserConfigValue]] = [] separator: str = "." for k, v in raw_dict.items(): new_key = f"{parent_key}{separator}{k}" if parent_key else k @@ -159,9 +159,9 @@ def flatten_dict( return dict(items) -def unflatten_dict(flat_dict: Dict[str, Any]) -> Dict[str, Any]: +def unflatten_dict(flat_dict: dict[str, Any]) -> dict[str, Any]: """Unflatten a dict with keys containing separators into a nested dict.""" - unflattened_dict: Dict[str, Any] = {} + unflattened_dict: dict[str, Any] = {} separator: str = "." for key, value in flat_dict.items(): @@ -177,7 +177,7 @@ def unflatten_dict(flat_dict: Dict[str, Any]) -> Dict[str, Any]: def parse_config_args( - config: Optional[List[str]], + config: Optional[list[str]], ) -> UserConfig: """Parse separator separated list of key-value pairs separated by '='.""" overrides: UserConfig = {} @@ -185,28 +185,31 @@ def parse_config_args( if config is None: return overrides + # Handle if .toml file is passed + if len(config) == 1 and config[0].endswith(".toml"): + with Path(config[0]).open("rb") as config_file: + overrides = flatten_dict(tomli.load(config_file)) + return overrides + # Regular expression to capture key-value pairs with possible quoted values pattern = re.compile(r"(\S+?)=(\'[^\']*\'|\"[^\"]*\"|\S+)") for config_line in config: if config_line: - matches = pattern.findall(config_line) + # .toml files aren't allowed alongside other configs + if config_line.endswith(".toml"): + raise ValueError( + "TOML files cannot be passed alongside key-value pairs." + ) - if ( - len(matches) == 1 - and "=" not in matches[0][0] - and matches[0][0].endswith(".toml") - ): - with Path(matches[0][0]).open("rb") as config_file: - overrides = flatten_dict(tomli.load(config_file)) - else: - toml_str = "\n".join(f"{k} = {v}" for k, v in matches) - overrides.update(tomli.loads(toml_str)) + matches = pattern.findall(config_line) + toml_str = "\n".join(f"{k} = {v}" for k, v in matches) + overrides.update(tomli.loads(toml_str)) return overrides -def get_metadata_from_config(config: Dict[str, Any]) -> Tuple[str, str]: +def get_metadata_from_config(config: dict[str, Any]) -> tuple[str, str]: """Extract `fab_version` and `fab_id` from a project config.""" return ( config["project"]["version"], diff --git a/src/py/flwr/common/config_test.py b/src/py/flwr/common/config_test.py index 712e07264d3f..34bc691cc957 100644 --- a/src/py/flwr/common/config_test.py +++ b/src/py/flwr/common/config_test.py @@ -15,6 +15,7 @@ """Test util functions handling Flower config.""" import os +import tempfile import textwrap from pathlib import Path from unittest.mock import patch @@ -254,3 +255,50 @@ def test_parse_config_args_overrides() -> None: "key5": True, "key6": "value6", } + + +def test_parse_config_args_from_toml_file() -> None: + """Test if a toml passed to --run-config it is loaded and fused correctly.""" + # Will be saved as a temp .toml file + toml_config = """ + num-server-rounds = 10 + momentum = 0.1 + verbose = true + """ + # This is the UserConfig that would be extracted from pyproject.toml + initial_run_config: UserConfig = { + "num-server-rounds": 5, + "momentum": 0.2, + "dataset": "my-fancy-dataset", + "verbose": False, + } + expected_config = { + "num-server-rounds": 10, + "momentum": 0.1, + "dataset": "my-fancy-dataset", + "verbose": True, + } + + # Create a temporary directory using a context manager + with tempfile.TemporaryDirectory() as temp_dir: + # Create a temporary TOML file within that directory + toml_config_file = os.path.join(temp_dir, "extra_config.toml") + + # Write the data to the TOML file + with open(toml_config_file, "w", encoding="utf-8") as toml_file: + toml_file.write(textwrap.dedent(toml_config)) + + # Parse config (this mimics what `--run-config path/to/config.toml` does) + config_from_toml = parse_config_args([toml_config_file]) + # Fuse + config = fuse_dicts(initial_run_config, config_from_toml) + + # Assert + assert config == expected_config + + +def test_parse_config_args_passing_toml_and_key_value() -> None: + """Test that passing a toml and key-value configs aren't allowed.""" + config = ["my-other-config.toml", "lr=0.1", "epochs=99"] + with pytest.raises(ValueError): + parse_config_args(config) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 72256a62add7..eabe324f41c5 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -37,7 +37,18 @@ TRANSPORT_TYPE_VCE, ] -SUPEREXEC_DEFAULT_ADDRESS = "0.0.0.0:9093" +# Addresses +# SuperNode +CLIENTAPPIO_API_DEFAULT_ADDRESS = "0.0.0.0:9094" +# SuperExec +EXEC_API_DEFAULT_ADDRESS = "0.0.0.0:9093" +# SuperLink +DRIVER_API_DEFAULT_ADDRESS = "0.0.0.0:9091" +FLEET_API_GRPC_RERE_DEFAULT_ADDRESS = "0.0.0.0:9092" +FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS = ( + "[::]:8080" # IPv6 to keep start_server compatible +) +FLEET_API_REST_DEFAULT_ADDRESS = "0.0.0.0:9093" # Constants for ping PING_DEFAULT_INTERVAL = 30 diff --git a/src/py/flwr/common/differential_privacy.py b/src/py/flwr/common/differential_privacy.py index 85dc198ef8a0..56da98a3c805 100644 --- a/src/py/flwr/common/differential_privacy.py +++ b/src/py/flwr/common/differential_privacy.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Optional, Tuple +from typing import Optional import numpy as np @@ -125,7 +125,7 @@ def compute_adaptive_noise_params( noise_multiplier: float, num_sampled_clients: float, clipped_count_stddev: Optional[float], -) -> Tuple[float, float]: +) -> tuple[float, float]: """Compute noising parameters for the adaptive clipping. Paper: https://arxiv.org/abs/1905.03871 diff --git a/src/py/flwr/common/dp.py b/src/py/flwr/common/dp.py index 527805c8ef42..13ae94461ef9 100644 --- a/src/py/flwr/common/dp.py +++ b/src/py/flwr/common/dp.py @@ -15,8 +15,6 @@ """Building block functions for DP algorithms.""" -from typing import Tuple - import numpy as np from flwr.common.logger import warn_deprecated_feature @@ -41,7 +39,7 @@ def add_gaussian_noise(update: NDArrays, std_dev: float) -> NDArrays: return update_noised -def clip_by_l2(update: NDArrays, threshold: float) -> Tuple[NDArrays, bool]: +def clip_by_l2(update: NDArrays, threshold: float) -> tuple[NDArrays, bool]: """Scales the update so thats its L2 norm is upper-bound to threshold.""" warn_deprecated_feature("`clip_by_l2` method") update_norm = _get_update_norm(update) diff --git a/src/py/flwr/common/exit_handlers.py b/src/py/flwr/common/exit_handlers.py index 30750c28a450..e5898b46a537 100644 --- a/src/py/flwr/common/exit_handlers.py +++ b/src/py/flwr/common/exit_handlers.py @@ -19,7 +19,7 @@ from signal import SIGINT, SIGTERM, signal from threading import Thread from types import FrameType -from typing import List, Optional +from typing import Optional from grpc import Server @@ -28,8 +28,8 @@ def register_exit_handlers( event_type: EventType, - grpc_servers: Optional[List[Server]] = None, - bckg_threads: Optional[List[Thread]] = None, + grpc_servers: Optional[list[Server]] = None, + bckg_threads: Optional[list[Thread]] = None, ) -> None: """Register exit handlers for `SIGINT` and `SIGTERM` signals. diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index ec8fe823a7eb..5a29c595119c 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -15,8 +15,9 @@ """Utility functions for gRPC.""" +from collections.abc import Sequence from logging import DEBUG -from typing import Optional, Sequence +from typing import Optional import grpc diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 2077f9beaca0..303780fc0b5d 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -18,7 +18,7 @@ import logging from logging import WARN, LogRecord from logging.handlers import HTTPHandler -from typing import TYPE_CHECKING, Any, Dict, Optional, TextIO, Tuple +from typing import TYPE_CHECKING, Any, Optional, TextIO # Create logger LOGGER_NAME = "flwr" @@ -119,12 +119,12 @@ def __init__( url: str, method: str = "GET", secure: bool = False, - credentials: Optional[Tuple[str, str]] = None, + credentials: Optional[tuple[str, str]] = None, ) -> None: super().__init__(host, url, method, secure, credentials) self.identifier = identifier - def mapLogRecord(self, record: LogRecord) -> Dict[str, Any]: + def mapLogRecord(self, record: LogRecord) -> dict[str, Any]: """Filter for the properties to be send to the logserver.""" record_dict = record.__dict__ return { diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index c6142cb18256..57c57eb41bd9 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -17,7 +17,7 @@ import time from collections import namedtuple from contextlib import ExitStack -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Optional import pytest @@ -193,7 +193,7 @@ def test_create_reply( ), ], ) -def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: +def test_repr(cls: type, kwargs: dict[str, Any]) -> None: """Test string representations of Metadata/Message/Error.""" # Prepare anon_cls = namedtuple(cls.__qualname__, kwargs.keys()) # type: ignore diff --git a/src/py/flwr/common/object_ref.py b/src/py/flwr/common/object_ref.py index 9723c14037a0..6259b5ab557d 100644 --- a/src/py/flwr/common/object_ref.py +++ b/src/py/flwr/common/object_ref.py @@ -21,7 +21,7 @@ from importlib.util import find_spec from logging import WARN from pathlib import Path -from typing import Any, Optional, Tuple, Type, Union +from typing import Any, Optional, Union from .logger import log @@ -40,7 +40,7 @@ def validate( module_attribute_str: str, check_module: bool = True, project_dir: Optional[Union[str, Path]] = None, -) -> Tuple[bool, Optional[str]]: +) -> tuple[bool, Optional[str]]: """Validate object reference. Parameters @@ -106,7 +106,7 @@ def validate( def load_app( # pylint: disable= too-many-branches module_attribute_str: str, - error_type: Type[Exception], + error_type: type[Exception], project_dir: Optional[Union[str, Path]] = None, ) -> Any: """Return the object specified in a module attribute string. diff --git a/src/py/flwr/common/record/configsrecord.py b/src/py/flwr/common/record/configsrecord.py index aeb311089bcd..f570e000cc9b 100644 --- a/src/py/flwr/common/record/configsrecord.py +++ b/src/py/flwr/common/record/configsrecord.py @@ -15,7 +15,7 @@ """ConfigsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import ConfigsRecordValues, ConfigsScalar @@ -109,7 +109,7 @@ class ConfigsRecord(TypedDict[str, ConfigsRecordValues]): def __init__( self, - configs_dict: Optional[Dict[str, ConfigsRecordValues]] = None, + configs_dict: Optional[dict[str, ConfigsRecordValues]] = None, keep_input: bool = True, ) -> None: @@ -141,7 +141,7 @@ def get_var_bytes(value: ConfigsScalar) -> int: num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): if isinstance(v[0], (bytes, str)): # not all str are of equal length necessarily # for both the footprint of each element is 1 Byte diff --git a/src/py/flwr/common/record/metricsrecord.py b/src/py/flwr/common/record/metricsrecord.py index 868ed82e79ca..d0a6123c807f 100644 --- a/src/py/flwr/common/record/metricsrecord.py +++ b/src/py/flwr/common/record/metricsrecord.py @@ -15,7 +15,7 @@ """MetricsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import MetricsRecordValues, MetricsScalar @@ -115,7 +115,7 @@ class MetricsRecord(TypedDict[str, MetricsRecordValues]): def __init__( self, - metrics_dict: Optional[Dict[str, MetricsRecordValues]] = None, + metrics_dict: Optional[dict[str, MetricsRecordValues]] = None, keep_input: bool = True, ): super().__init__(_check_key, _check_value) @@ -130,7 +130,7 @@ def count_bytes(self) -> int: num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): # both int and float normally take 4 bytes # But MetricRecords are mapped to 64bit int/float # during protobuffing diff --git a/src/py/flwr/common/record/parametersrecord.py b/src/py/flwr/common/record/parametersrecord.py index f088d682497b..10ec65ca0277 100644 --- a/src/py/flwr/common/record/parametersrecord.py +++ b/src/py/flwr/common/record/parametersrecord.py @@ -14,9 +14,10 @@ # ============================================================================== """ParametersRecord and Array.""" +from collections import OrderedDict from dataclasses import dataclass from io import BytesIO -from typing import List, Optional, OrderedDict, cast +from typing import Optional, cast import numpy as np @@ -51,7 +52,7 @@ class Array: """ dtype: str - shape: List[int] + shape: list[int] stype: str data: bytes diff --git a/src/py/flwr/common/record/parametersrecord_test.py b/src/py/flwr/common/record/parametersrecord_test.py index e840e5e266e4..9ac18a3ec854 100644 --- a/src/py/flwr/common/record/parametersrecord_test.py +++ b/src/py/flwr/common/record/parametersrecord_test.py @@ -17,7 +17,6 @@ import unittest from collections import OrderedDict from io import BytesIO -from typing import List import numpy as np import pytest @@ -81,7 +80,7 @@ def test_numpy_conversion_invalid(self) -> None: ([31, 153], "bool_"), # bool_ is represented as a whole Byte in NumPy ], ) -def test_count_bytes(shape: List[int], dtype: str) -> None: +def test_count_bytes(shape: list[int], dtype: str) -> None: """Test bytes in a ParametersRecord are computed correctly.""" original_array = np.random.randn(*shape).astype(np.dtype(dtype)) diff --git a/src/py/flwr/common/record/recordset.py b/src/py/flwr/common/record/recordset.py index f16a22695d6e..b2d1da4411bb 100644 --- a/src/py/flwr/common/record/recordset.py +++ b/src/py/flwr/common/record/recordset.py @@ -119,7 +119,7 @@ class RecordSet: Let's see an example. >>> from flwr.common import RecordSet - >>> from flwr.common import ConfigsRecords, MetricsRecords, ParametersRecord + >>> from flwr.common import ConfigsRecord, MetricsRecord, ParametersRecord >>> >>> # Let's begin with an empty record >>> my_recordset = RecordSet() diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index 96556d335f4c..154e320e5f0b 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -15,9 +15,9 @@ """RecordSet tests.""" import pickle -from collections import namedtuple +from collections import OrderedDict, namedtuple from copy import deepcopy -from typing import Callable, Dict, List, OrderedDict, Type, Union +from typing import Callable, Union import numpy as np import pytest @@ -158,8 +158,8 @@ def test_set_parameters_with_correct_types() -> None: ], ) def test_set_parameters_with_incorrect_types( - key_type: Type[Union[int, str]], - value_fn: Callable[[NDArray], Union[NDArray, List[float]]], + key_type: type[Union[int, str]], + value_fn: Callable[[NDArray], Union[NDArray, list[float]]], ) -> None: """Test adding dictionary of unsupported types to ParametersRecord.""" p_record = ParametersRecord() @@ -183,7 +183,7 @@ def test_set_parameters_with_incorrect_types( ], ) def test_set_metrics_to_metricsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], MetricsRecordValues], ) -> None: """Test adding metrics of various types to a MetricsRecord.""" @@ -236,8 +236,8 @@ def test_set_metrics_to_metricsrecord_with_correct_types( ], ) def test_set_metrics_to_metricsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float, bool]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float, bool]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding metrics of various unsupported types to a MetricsRecord.""" m_record = MetricsRecord() @@ -302,7 +302,7 @@ def test_set_metrics_to_metricsrecord_with_and_without_keeping_input( ], ) def test_set_configs_to_configsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], ConfigsRecordValues], ) -> None: """Test adding configs of various types to a ConfigsRecord.""" @@ -346,8 +346,8 @@ def test_set_configs_to_configsrecord_with_correct_types( ], ) def test_set_configs_to_configsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding configs of various unsupported types to a ConfigsRecord.""" c_record = ConfigsRecord() diff --git a/src/py/flwr/common/record/typeddict.py b/src/py/flwr/common/record/typeddict.py index 791077d8eff2..c2c8548c4de3 100644 --- a/src/py/flwr/common/record/typeddict.py +++ b/src/py/flwr/common/record/typeddict.py @@ -15,7 +15,8 @@ """Typed dict base class for *Records.""" -from typing import Callable, Dict, Generic, Iterator, MutableMapping, TypeVar, cast +from collections.abc import ItemsView, Iterator, KeysView, MutableMapping, ValuesView +from typing import Callable, Generic, TypeVar, cast K = TypeVar("K") # Key type V = TypeVar("V") # Value type @@ -38,38 +39,50 @@ def __setitem__(self, key: K, value: V) -> None: cast(Callable[[V], None], self.__dict__["_check_value_fn"])(value) # Set key-value pair - cast(Dict[K, V], self.__dict__["_data"])[key] = value + cast(dict[K, V], self.__dict__["_data"])[key] = value def __delitem__(self, key: K) -> None: """Remove the item with the specified key.""" - del cast(Dict[K, V], self.__dict__["_data"])[key] + del cast(dict[K, V], self.__dict__["_data"])[key] def __getitem__(self, item: K) -> V: """Return the value for the specified key.""" - return cast(Dict[K, V], self.__dict__["_data"])[item] + return cast(dict[K, V], self.__dict__["_data"])[item] def __iter__(self) -> Iterator[K]: """Yield an iterator over the keys of the dictionary.""" - return iter(cast(Dict[K, V], self.__dict__["_data"])) + return iter(cast(dict[K, V], self.__dict__["_data"])) def __repr__(self) -> str: """Return a string representation of the dictionary.""" - return cast(Dict[K, V], self.__dict__["_data"]).__repr__() + return cast(dict[K, V], self.__dict__["_data"]).__repr__() def __len__(self) -> int: """Return the number of items in the dictionary.""" - return len(cast(Dict[K, V], self.__dict__["_data"])) + return len(cast(dict[K, V], self.__dict__["_data"])) def __contains__(self, key: object) -> bool: """Check if the dictionary contains the specified key.""" - return key in cast(Dict[K, V], self.__dict__["_data"]) + return key in cast(dict[K, V], self.__dict__["_data"]) def __eq__(self, other: object) -> bool: """Compare this instance to another dictionary or TypedDict.""" - data = cast(Dict[K, V], self.__dict__["_data"]) + data = cast(dict[K, V], self.__dict__["_data"]) if isinstance(other, TypedDict): - other_data = cast(Dict[K, V], other.__dict__["_data"]) + other_data = cast(dict[K, V], other.__dict__["_data"]) return data == other_data if isinstance(other, dict): return data == other return NotImplemented + + def keys(self) -> KeysView[K]: + """D.keys() -> a set-like object providing a view on D's keys.""" + return cast(dict[K, V], self.__dict__["_data"]).keys() + + def values(self) -> ValuesView[V]: + """D.values() -> an object providing a view on D's values.""" + return cast(dict[K, V], self.__dict__["_data"]).values() + + def items(self) -> ItemsView[K, V]: + """D.items() -> a set-like object providing a view on D's items.""" + return cast(dict[K, V], self.__dict__["_data"]).items() diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py index 8bf884c30e58..35024fcd67d1 100644 --- a/src/py/flwr/common/recordset_compat.py +++ b/src/py/flwr/common/recordset_compat.py @@ -15,7 +15,9 @@ """RecordSet utilities.""" -from typing import Dict, Mapping, OrderedDict, Tuple, Union, cast, get_args +from collections import OrderedDict +from collections.abc import Mapping +from typing import Union, cast, get_args from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet from .typing import ( @@ -115,7 +117,7 @@ def parameters_to_parametersrecord( def _check_mapping_from_recordscalartype_to_scalar( record_data: Mapping[str, Union[ConfigsRecordValues, MetricsRecordValues]] -) -> Dict[str, Scalar]: +) -> dict[str, Scalar]: """Check mapping `common.*RecordValues` into `common.Scalar` is possible.""" for value in record_data.values(): if not isinstance(value, get_args(Scalar)): @@ -126,14 +128,14 @@ def _check_mapping_from_recordscalartype_to_scalar( "supported by the `common.RecordSet` infrastructure. " f"You used type: {type(value)}" ) - return cast(Dict[str, Scalar], record_data) + return cast(dict[str, Scalar], record_data) def _recordset_to_fit_or_evaluate_ins_components( recordset: RecordSet, ins_str: str, keep_input: bool, -) -> Tuple[Parameters, Dict[str, Scalar]]: +) -> tuple[Parameters, dict[str, Scalar]]: """Derive Fit/Evaluate Ins from a RecordSet.""" # get Array and construct Parameters parameters_record = recordset.parameters_records[f"{ins_str}.parameters"] @@ -169,7 +171,7 @@ def _fit_or_evaluate_ins_to_recordset( def _embed_status_into_recordset( res_str: str, status: Status, recordset: RecordSet ) -> RecordSet: - status_dict: Dict[str, ConfigsRecordValues] = { + status_dict: dict[str, ConfigsRecordValues] = { "code": int(status.code.value), "message": status.message, } diff --git a/src/py/flwr/common/recordset_compat_test.py b/src/py/flwr/common/recordset_compat_test.py index e0ac7f216af9..05d821e37e40 100644 --- a/src/py/flwr/common/recordset_compat_test.py +++ b/src/py/flwr/common/recordset_compat_test.py @@ -15,7 +15,7 @@ """RecordSet from legacy messages tests.""" from copy import deepcopy -from typing import Callable, Dict +from typing import Callable import numpy as np import pytest @@ -82,7 +82,7 @@ def _get_valid_fitins_with_empty_ndarrays() -> FitIns: def _get_valid_fitres() -> FitRes: """Returnn Valid parameters but potentially invalid config.""" arrays = get_ndarrays() - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return FitRes( parameters=ndarrays_to_parameters(arrays), num_examples=1, @@ -98,7 +98,7 @@ def _get_valid_evaluateins() -> EvaluateIns: def _get_valid_evaluateres() -> EvaluateRes: """Return potentially invalid config.""" - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return EvaluateRes( num_examples=1, loss=0.1, @@ -108,7 +108,7 @@ def _get_valid_evaluateres() -> EvaluateRes: def _get_valid_getparametersins() -> GetParametersIns: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, @@ -131,7 +131,7 @@ def _get_valid_getpropertiesins() -> GetPropertiesIns: def _get_valid_getpropertiesres() -> GetPropertiesRes: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index d12124b89840..303d5596f237 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -18,20 +18,9 @@ import itertools import random import time +from collections.abc import Generator, Iterable from dataclasses import dataclass -from typing import ( - Any, - Callable, - Dict, - Generator, - Iterable, - List, - Optional, - Tuple, - Type, - Union, - cast, -) +from typing import Any, Callable, Optional, Union, cast def exponential( @@ -93,8 +82,8 @@ class RetryState: """State for callbacks in RetryInvoker.""" target: Callable[..., Any] - args: Tuple[Any, ...] - kwargs: Dict[str, Any] + args: tuple[Any, ...] + kwargs: dict[str, Any] tries: int elapsed_time: float exception: Optional[Exception] = None @@ -167,7 +156,7 @@ class RetryInvoker: def __init__( self, wait_gen_factory: Callable[[], Generator[float, None, None]], - recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], + recoverable_exceptions: Union[type[Exception], tuple[type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], *, @@ -244,7 +233,7 @@ def try_call_event_handler( try_cnt = 0 wait_generator = self.wait_gen_factory() start = time.monotonic() - ref_state: List[Optional[RetryState]] = [None] + ref_state: list[Optional[RetryState]] = [None] while True: try_cnt += 1 diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index 2259ae47ded4..a9f2625ff443 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -15,7 +15,7 @@ """Tests for `RetryInvoker`.""" -from typing import Generator +from collections.abc import Generator from unittest.mock import MagicMock, Mock, patch import pytest diff --git a/src/py/flwr/common/secure_aggregation/crypto/shamir.py b/src/py/flwr/common/secure_aggregation/crypto/shamir.py index 688bfa2153ea..9c7e67abf94f 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/shamir.py +++ b/src/py/flwr/common/secure_aggregation/crypto/shamir.py @@ -17,20 +17,20 @@ import pickle from concurrent.futures import ThreadPoolExecutor -from typing import List, Tuple, cast +from typing import cast from Crypto.Protocol.SecretSharing import Shamir from Crypto.Util.Padding import pad, unpad -def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: +def create_shares(secret: bytes, threshold: int, num: int) -> list[bytes]: """Return list of shares (bytes).""" secret_padded = pad(secret, 16) secret_padded_chunk = [ (threshold, num, secret_padded[i : i + 16]) for i in range(0, len(secret_padded), 16) ] - share_list: List[List[Tuple[int, bytes]]] = [[] for _ in range(num)] + share_list: list[list[tuple[int, bytes]]] = [[] for _ in range(num)] with ThreadPoolExecutor(max_workers=10) as executor: for chunk_shares in executor.map( @@ -43,22 +43,22 @@ def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: return [pickle.dumps(shares) for shares in share_list] -def _shamir_split(threshold: int, num: int, chunk: bytes) -> List[Tuple[int, bytes]]: +def _shamir_split(threshold: int, num: int, chunk: bytes) -> list[tuple[int, bytes]]: return Shamir.split(threshold, num, chunk, ssss=False) # Reconstructing secret with PyCryptodome -def combine_shares(share_list: List[bytes]) -> bytes: +def combine_shares(share_list: list[bytes]) -> bytes: """Reconstruct secret from shares.""" - unpickled_share_list: List[List[Tuple[int, bytes]]] = [ - cast(List[Tuple[int, bytes]], pickle.loads(share)) for share in share_list + unpickled_share_list: list[list[tuple[int, bytes]]] = [ + cast(list[tuple[int, bytes]], pickle.loads(share)) for share in share_list ] chunk_num = len(unpickled_share_list[0]) secret_padded = bytearray(0) - chunk_shares_list: List[List[Tuple[int, bytes]]] = [] + chunk_shares_list: list[list[tuple[int, bytes]]] = [] for i in range(chunk_num): - chunk_shares: List[Tuple[int, bytes]] = [] + chunk_shares: list[tuple[int, bytes]] = [] for share in unpickled_share_list: chunk_shares.append(share[i]) chunk_shares_list.append(chunk_shares) @@ -71,5 +71,5 @@ def combine_shares(share_list: List[bytes]) -> bytes: return bytes(secret) -def _shamir_combine(shares: List[Tuple[int, bytes]]) -> bytes: +def _shamir_combine(shares: list[tuple[int, bytes]]) -> bytes: return Shamir.combine(shares, ssss=False) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 59ca84d604b8..f5c130fb2663 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -16,7 +16,7 @@ import base64 -from typing import Tuple, cast +from typing import cast from cryptography.exceptions import InvalidSignature from cryptography.fernet import Fernet @@ -26,7 +26,7 @@ def generate_key_pairs() -> ( - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ): """Generate private and public key pairs with Cryptography.""" private_key = ec.generate_private_key(ec.SECP384R1()) diff --git a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py index 207c15b61518..3197fd852f3d 100644 --- a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py +++ b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py @@ -15,51 +15,51 @@ """Utility functions for performing operations on Numpy NDArrays.""" -from typing import Any, List, Tuple, Union +from typing import Any, Union import numpy as np from numpy.typing import DTypeLike, NDArray -def factor_combine(factor: int, parameters: List[NDArray[Any]]) -> List[NDArray[Any]]: +def factor_combine(factor: int, parameters: list[NDArray[Any]]) -> list[NDArray[Any]]: """Combine factor with parameters.""" return [np.array([factor])] + parameters def factor_extract( - parameters: List[NDArray[Any]], -) -> Tuple[int, List[NDArray[Any]]]: + parameters: list[NDArray[Any]], +) -> tuple[int, list[NDArray[Any]]]: """Extract factor from parameters.""" return parameters[0][0], parameters[1:] -def get_parameters_shape(parameters: List[NDArray[Any]]) -> List[Tuple[int, ...]]: +def get_parameters_shape(parameters: list[NDArray[Any]]) -> list[tuple[int, ...]]: """Get dimensions of each NDArray in parameters.""" return [arr.shape for arr in parameters] def get_zero_parameters( - dimensions_list: List[Tuple[int, ...]], dtype: DTypeLike = np.int64 -) -> List[NDArray[Any]]: + dimensions_list: list[tuple[int, ...]], dtype: DTypeLike = np.int64 +) -> list[NDArray[Any]]: """Generate zero parameters based on the dimensions list.""" return [np.zeros(dimensions, dtype=dtype) for dimensions in dimensions_list] def parameters_addition( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Add two parameters.""" return [parameters1[idx] + parameters2[idx] for idx in range(len(parameters1))] def parameters_subtraction( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Subtract parameters from the other parameters.""" return [parameters1[idx] - parameters2[idx] for idx in range(len(parameters1))] -def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray[Any]]: +def parameters_mod(parameters: list[NDArray[Any]], divisor: int) -> list[NDArray[Any]]: """Take mod of parameters with an integer divisor.""" if bin(divisor).count("1") == 1: msk = divisor - 1 @@ -68,14 +68,14 @@ def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray def parameters_multiply( - parameters: List[NDArray[Any]], multiplier: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], multiplier: Union[int, float] +) -> list[NDArray[Any]]: """Multiply parameters by an integer/float multiplier.""" return [parameters[idx] * multiplier for idx in range(len(parameters))] def parameters_divide( - parameters: List[NDArray[Any]], divisor: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], divisor: Union[int, float] +) -> list[NDArray[Any]]: """Divide weight by an integer/float divisor.""" return [parameters[idx] / divisor for idx in range(len(parameters))] diff --git a/src/py/flwr/common/secure_aggregation/quantization.py b/src/py/flwr/common/secure_aggregation/quantization.py index 7946276b6a4f..ab8521eed981 100644 --- a/src/py/flwr/common/secure_aggregation/quantization.py +++ b/src/py/flwr/common/secure_aggregation/quantization.py @@ -15,7 +15,7 @@ """Utility functions for model quantization.""" -from typing import List, cast +from typing import cast import numpy as np @@ -30,10 +30,10 @@ def _stochastic_round(arr: NDArrayFloat) -> NDArrayInt: def quantize( - parameters: List[NDArrayFloat], clipping_range: float, target_range: int -) -> List[NDArrayInt]: + parameters: list[NDArrayFloat], clipping_range: float, target_range: int +) -> list[NDArrayInt]: """Quantize float Numpy arrays to integer Numpy arrays.""" - quantized_list: List[NDArrayInt] = [] + quantized_list: list[NDArrayInt] = [] quantizer = target_range / (2 * clipping_range) for arr in parameters: # Stochastic quantization @@ -49,12 +49,12 @@ def quantize( # Dequantize parameters to range [-clipping_range, clipping_range] def dequantize( - quantized_parameters: List[NDArrayInt], + quantized_parameters: list[NDArrayInt], clipping_range: float, target_range: int, -) -> List[NDArrayFloat]: +) -> list[NDArrayFloat]: """Dequantize integer Numpy arrays to float Numpy arrays.""" - reverse_quantized_list: List[NDArrayFloat] = [] + reverse_quantized_list: list[NDArrayFloat] = [] quantizer = (2 * clipping_range) / target_range shift = -clipping_range for arr in quantized_parameters: diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py index cf6ac3bfb003..7bfb80f57891 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py @@ -15,8 +15,6 @@ """Utility functions for the SecAgg/SecAgg+ protocol.""" -from typing import List, Tuple - import numpy as np from flwr.common.typing import NDArrayInt @@ -54,7 +52,7 @@ def share_keys_plaintext_concat( ) -def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, bytes]: +def share_keys_plaintext_separate(plaintext: bytes) -> tuple[int, int, bytes, bytes]: """Retrieve arguments from bytes. Parameters @@ -83,8 +81,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, by def pseudo_rand_gen( - seed: bytes, num_range: int, dimensions_list: List[Tuple[int, ...]] -) -> List[NDArrayInt]: + seed: bytes, num_range: int, dimensions_list: list[tuple[int, ...]] +) -> list[NDArrayInt]: """Seeded pseudo-random number generator for noise generation with Numpy.""" assert len(seed) & 0x3 == 0 seed32 = 0 diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 76265b9836d1..87e01b05d341 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -15,7 +15,9 @@ """ProtoBuf serialization and deserialization.""" -from typing import Any, Dict, List, MutableMapping, OrderedDict, Type, TypeVar, cast +from collections import OrderedDict +from collections.abc import MutableMapping +from typing import Any, TypeVar, cast from google.protobuf.message import Message as GrpcMessage @@ -72,7 +74,7 @@ def parameters_to_proto(parameters: typing.Parameters) -> Parameters: def parameters_from_proto(msg: Parameters) -> typing.Parameters: """Deserialize `Parameters` from ProtoBuf.""" - tensors: List[bytes] = list(msg.tensors) + tensors: list[bytes] = list(msg.tensors) return typing.Parameters(tensors=tensors, tensor_type=msg.tensor_type) @@ -390,7 +392,7 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: def _record_value_to_proto( - value: Any, allowed_types: List[type], proto_class: Type[T] + value: Any, allowed_types: list[type], proto_class: type[T] ) -> T: """Serialize `*RecordValue` to ProtoBuf. @@ -427,9 +429,9 @@ def _record_value_from_proto(value_proto: GrpcMessage) -> Any: def _record_value_dict_to_proto( value_dict: TypedDict[str, Any], - allowed_types: List[type], - value_proto_class: Type[T], -) -> Dict[str, T]: + allowed_types: list[type], + value_proto_class: type[T], +) -> dict[str, T]: """Serialize the record value dict to ProtoBuf. Note: `bool` MUST be put in the front of allowd_types if it exists. @@ -447,7 +449,7 @@ def proto(_v: Any) -> T: def _record_value_dict_from_proto( value_dict_proto: MutableMapping[str, Any] -) -> Dict[str, Any]: +) -> dict[str, Any]: """Deserialize the record value dict from ProtoBuf.""" return {k: _record_value_from_proto(v) for k, v in value_dict_proto.items()} @@ -498,7 +500,7 @@ def metrics_record_from_proto(record_proto: ProtoMetricsRecord) -> MetricsRecord """Deserialize MetricsRecord from ProtoBuf.""" return MetricsRecord( metrics_dict=cast( - Dict[str, typing.MetricsRecordValues], + dict[str, typing.MetricsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, @@ -520,7 +522,7 @@ def configs_record_from_proto(record_proto: ProtoConfigsRecord) -> ConfigsRecord """Deserialize ConfigsRecord from ProtoBuf.""" return ConfigsRecord( configs_dict=cast( - Dict[str, typing.ConfigsRecordValues], + dict[str, typing.ConfigsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 013d04a32fd4..49d1e38fa897 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -16,7 +16,8 @@ import random import string -from typing import Any, Callable, Optional, OrderedDict, Type, TypeVar, Union, cast +from collections import OrderedDict +from typing import Any, Callable, Optional, TypeVar, Union, cast import pytest @@ -169,7 +170,7 @@ def get_str(self, length: Optional[int] = None) -> str: length = self.rng.randint(1, 10) return "".join(self.rng.choices(char_pool, k=length)) - def get_value(self, dtype: Type[T]) -> T: + def get_value(self, dtype: type[T]) -> T: """Create a value of a given type.""" ret: Any = None if dtype == bool: diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 399f400b7edc..724f36d2b98f 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -25,7 +25,7 @@ from concurrent.futures import Future, ThreadPoolExecutor from enum import Enum, auto from pathlib import Path -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from flwr.common.version import package_name, package_version @@ -126,64 +126,70 @@ class EventType(str, Enum): # The type signature is not compatible with mypy, pylint and flake8 # so each of those needs to be disabled for this line. # pylint: disable-next=no-self-argument,arguments-differ,line-too-long - def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: # type: ignore # noqa: E501 + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 return name # Ping PING = auto() - # Client: start_client + # --- LEGACY FUNCTIONS ------------------------------------------------------------- + + # Legacy: `start_client` function START_CLIENT_ENTER = auto() START_CLIENT_LEAVE = auto() - # Server: start_server + # Legacy: `start_server` function START_SERVER_ENTER = auto() START_SERVER_LEAVE = auto() - # Driver API - RUN_DRIVER_API_ENTER = auto() - RUN_DRIVER_API_LEAVE = auto() + # Legacy: `start_simulation` function + START_SIMULATION_ENTER = auto() + START_SIMULATION_LEAVE = auto() - # Fleet API - RUN_FLEET_API_ENTER = auto() - RUN_FLEET_API_LEAVE = auto() + # --- `flwr` CLI ------------------------------------------------------------------- - # Driver API and Fleet API - RUN_SUPERLINK_ENTER = auto() - RUN_SUPERLINK_LEAVE = auto() + # Not yet implemented - # Simulation - START_SIMULATION_ENTER = auto() - START_SIMULATION_LEAVE = auto() + # --- SuperExec -------------------------------------------------------------------- - # Driver: Driver - DRIVER_CONNECT = auto() - DRIVER_DISCONNECT = auto() + # SuperExec + RUN_SUPEREXEC_ENTER = auto() + RUN_SUPEREXEC_LEAVE = auto() - # Driver: start_driver - START_DRIVER_ENTER = auto() - START_DRIVER_LEAVE = auto() + # --- Simulation Engine ------------------------------------------------------------ - # flower-client-app - RUN_CLIENT_APP_ENTER = auto() - RUN_CLIENT_APP_LEAVE = auto() + # CLI: flower-simulation + CLI_FLOWER_SIMULATION_ENTER = auto() + CLI_FLOWER_SIMULATION_LEAVE = auto() - # flower-server-app - RUN_SERVER_APP_ENTER = auto() - RUN_SERVER_APP_LEAVE = auto() + # Python API: `run_simulation` + PYTHON_API_RUN_SIMULATION_ENTER = auto() + PYTHON_API_RUN_SIMULATION_LEAVE = auto() - # SuperNode + # --- Deployment Engine ------------------------------------------------------------ + + # CLI: `flower-superlink` + RUN_SUPERLINK_ENTER = auto() + RUN_SUPERLINK_LEAVE = auto() + + # CLI: `flower-supernode` RUN_SUPERNODE_ENTER = auto() RUN_SUPERNODE_LEAVE = auto() - # SuperExec - RUN_SUPEREXEC_ENTER = auto() - RUN_SUPEREXEC_LEAVE = auto() + # CLI: `flower-server-app` + RUN_SERVER_APP_ENTER = auto() + RUN_SERVER_APP_LEAVE = auto() + + # --- DEPRECATED ------------------------------------------------------------------- + + # [DEPRECATED] CLI: `flower-client-app` + RUN_CLIENT_APP_ENTER = auto() + RUN_CLIENT_APP_LEAVE = auto() # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. -state: Dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { # Will be assigned ThreadPoolExecutor(max_workers=1) # in event() the first time it's required "executor": None, @@ -195,7 +201,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A def event( event_type: EventType, - event_details: Optional[Dict[str, Any]] = None, + event_details: Optional[dict[str, Any]] = None, ) -> Future: # type: ignore """Submit create_event to ThreadPoolExecutor to avoid blocking.""" if state["executor"] is None: @@ -207,7 +213,7 @@ def event( return result -def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) -> str: +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: """Create telemetry event.""" if state["source"] is None: state["source"] = _get_source_id() diff --git a/src/py/flwr/common/typing.py b/src/py/flwr/common/typing.py index b1dec8d0420b..081a957f28ff 100644 --- a/src/py/flwr/common/typing.py +++ b/src/py/flwr/common/typing.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import numpy as np import numpy.typing as npt @@ -25,7 +25,7 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] # The following union type contains Python types corresponding to ProtoBuf types that # ProtoBuf considers to be "Scalar Value Types", even though some of them arguably do @@ -38,31 +38,31 @@ float, int, str, - List[bool], - List[bytes], - List[float], - List[int], - List[str], + list[bool], + list[bytes], + list[float], + list[int], + list[str], ] # Value types for common.MetricsRecord MetricsScalar = Union[int, float] -MetricsScalarList = Union[List[int], List[float]] +MetricsScalarList = Union[list[int], list[float]] MetricsRecordValues = Union[MetricsScalar, MetricsScalarList] # Value types for common.ConfigsRecord ConfigsScalar = Union[MetricsScalar, str, bytes, bool] -ConfigsScalarList = Union[MetricsScalarList, List[str], List[bytes], List[bool]] +ConfigsScalarList = Union[MetricsScalarList, list[str], list[bytes], list[bool]] ConfigsRecordValues = Union[ConfigsScalar, ConfigsScalarList] -Metrics = Dict[str, Scalar] -MetricsAggregationFn = Callable[[List[Tuple[int, Metrics]]], Metrics] +Metrics = dict[str, Scalar] +MetricsAggregationFn = Callable[[list[tuple[int, Metrics]]], Metrics] -Config = Dict[str, Scalar] -Properties = Dict[str, Scalar] +Config = dict[str, Scalar] +Properties = dict[str, Scalar] # Value type for user configs UserConfigValue = Union[bool, float, int, str] -UserConfig = Dict[str, UserConfigValue] +UserConfig = dict[str, UserConfigValue] class Code(Enum): @@ -103,7 +103,7 @@ class ClientAppOutputStatus: class Parameters: """Model parameters.""" - tensors: List[bytes] + tensors: list[bytes] tensor_type: str @@ -127,7 +127,7 @@ class FitIns: """Fit instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -137,7 +137,7 @@ class FitRes: status: Status parameters: Parameters num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass @@ -145,7 +145,7 @@ class EvaluateIns: """Evaluate instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -155,7 +155,7 @@ class EvaluateRes: status: Status loss: float num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass diff --git a/src/py/flwr/common/version.py b/src/py/flwr/common/version.py index ac13f70d8a88..141c16ac9367 100644 --- a/src/py/flwr/common/version.py +++ b/src/py/flwr/common/version.py @@ -15,15 +15,14 @@ """Flower package version helper.""" import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower package name and version. Returns diff --git a/src/py/flwr/proto/clientappio_pb2.py b/src/py/flwr/proto/clientappio_pb2.py index 9fd5302fe6cd..3fdc9f8a6ece 100644 --- a/src/py/flwr/proto/clientappio_pb2.py +++ b/src/py/flwr/proto/clientappio_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import message_pb2 as flwr_dot_proto_dot_message__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/clientappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x18\x66lwr/proto/message.proto\"W\n\x15\x43lientAppOutputStatus\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.flwr.proto.ClientAppOutputCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x11\n\x0fGetTokenRequest\"!\n\x10GetTokenResponse\x12\r\n\x05token\x18\x01 \x01(\x12\"+\n\x1aPullClientAppInputsRequest\x12\r\n\x05token\x18\x01 \x01(\x12\"\xa5\x01\n\x1bPullClientAppInputsResponse\x12$\n\x07message\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\"x\n\x1bPushClientAppOutputsRequest\x12\r\n\x05token\x18\x01 \x01(\x12\x12$\n\x07message\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x03 \x01(\x0b\x32\x13.flwr.proto.Context\"Q\n\x1cPushClientAppOutputsResponse\x12\x31\n\x06status\x18\x01 \x01(\x0b\x32!.flwr.proto.ClientAppOutputStatus*L\n\x13\x43lientAppOutputCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x32\xad\x02\n\x0b\x43lientAppIo\x12G\n\x08GetToken\x12\x1b.flwr.proto.GetTokenRequest\x1a\x1c.flwr.proto.GetTokenResponse\"\x00\x12h\n\x13PullClientAppInputs\x12&.flwr.proto.PullClientAppInputsRequest\x1a\'.flwr.proto.PullClientAppInputsResponse\"\x00\x12k\n\x14PushClientAppOutputs\x12\'.flwr.proto.PushClientAppOutputsRequest\x1a(.flwr.proto.PushClientAppOutputsResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/clientappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x18\x66lwr/proto/message.proto\"W\n\x15\x43lientAppOutputStatus\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.flwr.proto.ClientAppOutputCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x11\n\x0fGetTokenRequest\"!\n\x10GetTokenResponse\x12\r\n\x05token\x18\x01 \x01(\x04\"+\n\x1aPullClientAppInputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\"\xa5\x01\n\x1bPullClientAppInputsResponse\x12$\n\x07message\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\"x\n\x1bPushClientAppOutputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\x12$\n\x07message\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x03 \x01(\x0b\x32\x13.flwr.proto.Context\"Q\n\x1cPushClientAppOutputsResponse\x12\x31\n\x06status\x18\x01 \x01(\x0b\x32!.flwr.proto.ClientAppOutputStatus*L\n\x13\x43lientAppOutputCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x32\xad\x02\n\x0b\x43lientAppIo\x12G\n\x08GetToken\x12\x1b.flwr.proto.GetTokenRequest\x1a\x1c.flwr.proto.GetTokenResponse\"\x00\x12h\n\x13PullClientAppInputs\x12&.flwr.proto.PullClientAppInputsRequest\x1a\'.flwr.proto.PullClientAppInputsResponse\"\x00\x12k\n\x14PushClientAppOutputs\x12\'.flwr.proto.PushClientAppOutputsRequest\x1a(.flwr.proto.PushClientAppOutputsResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/control_pb2.py b/src/py/flwr/proto/control_pb2.py new file mode 100644 index 000000000000..eb1c18d8dcff --- /dev/null +++ b/src/py/flwr/proto/control_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/control.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/control.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/run.proto2\x88\x02\n\x07\x43ontrol\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12S\n\x0cGetRunStatus\x12\x1f.flwr.proto.GetRunStatusRequest\x1a .flwr.proto.GetRunStatusResponse\"\x00\x12\\\n\x0fUpdateRunStatus\x12\".flwr.proto.UpdateRunStatusRequest\x1a#.flwr.proto.UpdateRunStatusResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.control_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_CONTROL']._serialized_start=63 + _globals['_CONTROL']._serialized_end=327 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/control_pb2.pyi b/src/py/flwr/proto/control_pb2.pyi new file mode 100644 index 000000000000..e08fa11c2caa --- /dev/null +++ b/src/py/flwr/proto/control_pb2.pyi @@ -0,0 +1,7 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import google.protobuf.descriptor + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor diff --git a/src/py/flwr/proto/control_pb2_grpc.py b/src/py/flwr/proto/control_pb2_grpc.py new file mode 100644 index 000000000000..a59f90f15935 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.py @@ -0,0 +1,135 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +class ControlStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateRun = channel.unary_unary( + '/flwr.proto.Control/CreateRun', + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + ) + self.GetRunStatus = channel.unary_unary( + '/flwr.proto.Control/GetRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + ) + self.UpdateRunStatus = channel.unary_unary( + '/flwr.proto.Control/UpdateRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + ) + + +class ControlServicer(object): + """Missing associated documentation comment in .proto file.""" + + def CreateRun(self, request, context): + """Request to create a new run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetRunStatus(self, request, context): + """Get the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateRunStatus(self, request, context): + """Update the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControlServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateRun': grpc.unary_unary_rpc_method_handler( + servicer.CreateRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, + ), + 'GetRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.GetRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.SerializeToString, + ), + 'UpdateRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.Control', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Control(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def CreateRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/CreateRun', + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/GetRunStatus', + flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/UpdateRunStatus', + flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/control_pb2_grpc.pyi b/src/py/flwr/proto/control_pb2_grpc.pyi new file mode 100644 index 000000000000..7817e2b12e31 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.pyi @@ -0,0 +1,53 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.run_pb2 +import grpc + +class ControlStub: + def __init__(self, channel: grpc.Channel) -> None: ... + CreateRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] + """Request to create a new run""" + + GetRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetRunStatusRequest, + flwr.proto.run_pb2.GetRunStatusResponse] + """Get the status of a given run""" + + UpdateRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.UpdateRunStatusRequest, + flwr.proto.run_pb2.UpdateRunStatusResponse] + """Update the status of a given run""" + + +class ControlServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def CreateRun(self, + request: flwr.proto.run_pb2.CreateRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.CreateRunResponse: + """Request to create a new run""" + pass + + @abc.abstractmethod + def GetRunStatus(self, + request: flwr.proto.run_pb2.GetRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetRunStatusResponse: + """Get the status of a given run""" + pass + + @abc.abstractmethod + def UpdateRunStatus(self, + request: flwr.proto.run_pb2.UpdateRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.UpdateRunStatusResponse: + """Update the status of a given run""" + pass + + +def add_ControlServicer_to_server(servicer: ControlServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index dde72620f5bf..d294b03be5af 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,36 +16,27 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.driver_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._options = None - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' - _globals['_CREATERUNREQUEST']._serialized_start=158 - _globals['_CREATERUNREQUEST']._serialized_end=393 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=320 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=393 - _globals['_CREATERUNRESPONSE']._serialized_start=395 - _globals['_CREATERUNRESPONSE']._serialized_end=430 - _globals['_GETNODESREQUEST']._serialized_start=432 - _globals['_GETNODESREQUEST']._serialized_end=465 - _globals['_GETNODESRESPONSE']._serialized_start=467 - _globals['_GETNODESRESPONSE']._serialized_end=518 - _globals['_PUSHTASKINSREQUEST']._serialized_start=520 - _globals['_PUSHTASKINSREQUEST']._serialized_end=584 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=586 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=625 - _globals['_PULLTASKRESREQUEST']._serialized_start=627 - _globals['_PULLTASKRESREQUEST']._serialized_end=697 - _globals['_PULLTASKRESRESPONSE']._serialized_start=699 - _globals['_PULLTASKRESRESPONSE']._serialized_end=764 - _globals['_DRIVER']._serialized_start=767 - _globals['_DRIVER']._serialized_end=1222 + _globals['_GETNODESREQUEST']._serialized_start=129 + _globals['_GETNODESREQUEST']._serialized_end=162 + _globals['_GETNODESRESPONSE']._serialized_start=164 + _globals['_GETNODESRESPONSE']._serialized_end=215 + _globals['_PUSHTASKINSREQUEST']._serialized_start=217 + _globals['_PUSHTASKINSREQUEST']._serialized_end=281 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=283 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=322 + _globals['_PULLTASKRESREQUEST']._serialized_start=324 + _globals['_PULLTASKRESREQUEST']._serialized_end=394 + _globals['_PULLTASKRESRESPONSE']._serialized_start=396 + _globals['_PULLTASKRESRESPONSE']._serialized_end=461 + _globals['_DRIVER']._serialized_start=464 + _globals['_DRIVER']._serialized_end=919 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index d025e00474eb..77ceb496d70c 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -3,10 +3,8 @@ isort:skip_file """ import builtins -import flwr.proto.fab_pb2 import flwr.proto.node_pb2 import flwr.proto.task_pb2 -import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message @@ -15,56 +13,6 @@ import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor -class CreateRunRequest(google.protobuf.message.Message): - """CreateRun""" - DESCRIPTOR: google.protobuf.descriptor.Descriptor - class OverrideConfigEntry(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - KEY_FIELD_NUMBER: builtins.int - VALUE_FIELD_NUMBER: builtins.int - key: typing.Text - @property - def value(self) -> flwr.proto.transport_pb2.Scalar: ... - def __init__(self, - *, - key: typing.Text = ..., - value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - - FAB_ID_FIELD_NUMBER: builtins.int - FAB_VERSION_FIELD_NUMBER: builtins.int - OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int - FAB_FIELD_NUMBER: builtins.int - fab_id: typing.Text - fab_version: typing.Text - @property - def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... - @property - def fab(self) -> flwr.proto.fab_pb2.Fab: ... - def __init__(self, - *, - fab_id: typing.Text = ..., - fab_version: typing.Text = ..., - override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., - fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config"]) -> None: ... -global___CreateRunRequest = CreateRunRequest - -class CreateRunResponse(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_ID_FIELD_NUMBER: builtins.int - run_id: builtins.int - def __init__(self, - *, - run_id: builtins.int = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... -global___CreateRunResponse = CreateRunResponse - class GetNodesRequest(google.protobuf.message.Message): """GetNodes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor diff --git a/src/py/flwr/proto/driver_pb2_grpc.py b/src/py/flwr/proto/driver_pb2_grpc.py index 6745bc7af62a..91e9fd8b9bdd 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.py +++ b/src/py/flwr/proto/driver_pb2_grpc.py @@ -18,8 +18,8 @@ def __init__(self, channel): """ self.CreateRun = channel.unary_unary( '/flwr.proto.Driver/CreateRun', - request_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, ) self.GetNodes = channel.unary_unary( '/flwr.proto.Driver/GetNodes', @@ -98,8 +98,8 @@ def add_DriverServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateRun': grpc.unary_unary_rpc_method_handler( servicer.CreateRun, - request_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.SerializeToString, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, ), 'GetNodes': grpc.unary_unary_rpc_method_handler( servicer.GetNodes, @@ -148,8 +148,8 @@ def CreateRun(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateRun', - flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/driver_pb2_grpc.pyi b/src/py/flwr/proto/driver_pb2_grpc.pyi index 7f9fd0acbd82..8f665301073d 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.pyi +++ b/src/py/flwr/proto/driver_pb2_grpc.pyi @@ -11,8 +11,8 @@ import grpc class DriverStub: def __init__(self, channel: grpc.Channel) -> None: ... CreateRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.CreateRunRequest, - flwr.proto.driver_pb2.CreateRunResponse] + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] """Request run_id""" GetNodes: grpc.UnaryUnaryMultiCallable[ @@ -44,9 +44,9 @@ class DriverStub: class DriverServicer(metaclass=abc.ABCMeta): @abc.abstractmethod def CreateRun(self, - request: flwr.proto.driver_pb2.CreateRunRequest, + request: flwr.proto.run_pb2.CreateRunRequest, context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.CreateRunResponse: + ) -> flwr.proto.run_pb2.CreateRunResponse: """Request run_id""" pass diff --git a/src/py/flwr/proto/exec_pb2.py b/src/py/flwr/proto/exec_pb2.py index 3fe109067296..574f39eaa18d 100644 --- a/src/py/flwr/proto/exec_pb2.py +++ b/src/py/flwr/proto/exec_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/message_pb2.py b/src/py/flwr/proto/message_pb2.py index 7e2555972a8a..d2201cb07b56 100644 --- a/src/py/flwr/proto/message_pb2.py +++ b/src/py/flwr/proto/message_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x12\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x12\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x04\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x04\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/node_pb2.py b/src/py/flwr/proto/node_pb2.py index b300f2c562c2..f94691db6c3f 100644 --- a/src/py/flwr/proto/node_pb2.py +++ b/src/py/flwr/proto/node_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/run_pb2.py b/src/py/flwr/proto/run_pb2.py index 4892091a6a46..d59cc26fbb48 100644 --- a/src/py/flwr/proto/run_pb2.py +++ b/src/py/flwr/proto/run_pb2.py @@ -12,10 +12,11 @@ _sym_db = _symbol_database.Default() +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Runb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"&\n\x13GetRunStatusRequest\x12\x0f\n\x07run_ids\x18\x01 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,12 +25,34 @@ DESCRIPTOR._options = None _globals['_RUN_OVERRIDECONFIGENTRY']._options = None _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' - _globals['_RUN']._serialized_start=65 - _globals['_RUN']._serialized_end=278 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=205 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=278 - _globals['_GETRUNREQUEST']._serialized_start=280 - _globals['_GETRUNREQUEST']._serialized_end=311 - _globals['_GETRUNRESPONSE']._serialized_start=313 - _globals['_GETRUNRESPONSE']._serialized_end=359 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._options = None + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._options = None + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_options = b'8\001' + _globals['_RUN']._serialized_start=87 + _globals['_RUN']._serialized_end=300 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=227 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=300 + _globals['_RUNSTATUS']._serialized_start=302 + _globals['_RUNSTATUS']._serialized_end=366 + _globals['_CREATERUNREQUEST']._serialized_start=369 + _globals['_CREATERUNREQUEST']._serialized_end=604 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=227 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=300 + _globals['_CREATERUNRESPONSE']._serialized_start=606 + _globals['_CREATERUNRESPONSE']._serialized_end=641 + _globals['_GETRUNREQUEST']._serialized_start=643 + _globals['_GETRUNREQUEST']._serialized_end=674 + _globals['_GETRUNRESPONSE']._serialized_start=676 + _globals['_GETRUNRESPONSE']._serialized_end=722 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=724 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=807 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=809 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=834 + _globals['_GETRUNSTATUSREQUEST']._serialized_start=836 + _globals['_GETRUNSTATUSREQUEST']._serialized_end=874 + _globals['_GETRUNSTATUSRESPONSE']._serialized_start=877 + _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1054 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=979 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1054 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/run_pb2.pyi b/src/py/flwr/proto/run_pb2.pyi index e65feee9c518..cec90c4d2d4c 100644 --- a/src/py/flwr/proto/run_pb2.pyi +++ b/src/py/flwr/proto/run_pb2.pyi @@ -3,6 +3,7 @@ isort:skip_file """ import builtins +import flwr.proto.fab_pb2 import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -51,7 +52,81 @@ class Run(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["fab_hash",b"fab_hash","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config","run_id",b"run_id"]) -> None: ... global___Run = Run +class RunStatus(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + STATUS_FIELD_NUMBER: builtins.int + SUB_STATUS_FIELD_NUMBER: builtins.int + DETAILS_FIELD_NUMBER: builtins.int + status: typing.Text + """"starting", "running", "finished" """ + + sub_status: typing.Text + """"completed", "failed", "stopped" or "" (non-finished)""" + + details: typing.Text + """failure details""" + + def __init__(self, + *, + status: typing.Text = ..., + sub_status: typing.Text = ..., + details: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["details",b"details","status",b"status","sub_status",b"sub_status"]) -> None: ... +global___RunStatus = RunStatus + +class CreateRunRequest(google.protobuf.message.Message): + """CreateRun""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class OverrideConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + fab_id: typing.Text + fab_version: typing.Text + @property + def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config"]) -> None: ... +global___CreateRunRequest = CreateRunRequest + +class CreateRunResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + run_id: builtins.int + def __init__(self, + *, + run_id: builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___CreateRunResponse = CreateRunResponse + class GetRunRequest(google.protobuf.message.Message): + """GetRun""" DESCRIPTOR: google.protobuf.descriptor.Descriptor RUN_ID_FIELD_NUMBER: builtins.int run_id: builtins.int @@ -74,3 +149,66 @@ class GetRunResponse(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... global___GetRunResponse = GetRunResponse + +class UpdateRunStatusRequest(google.protobuf.message.Message): + """UpdateRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + RUN_STATUS_FIELD_NUMBER: builtins.int + run_id: builtins.int + @property + def run_status(self) -> global___RunStatus: ... + def __init__(self, + *, + run_id: builtins.int = ..., + run_status: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run_status",b"run_status"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id","run_status",b"run_status"]) -> None: ... +global___UpdateRunStatusRequest = UpdateRunStatusRequest + +class UpdateRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___UpdateRunStatusResponse = UpdateRunStatusResponse + +class GetRunStatusRequest(google.protobuf.message.Message): + """GetRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_IDS_FIELD_NUMBER: builtins.int + @property + def run_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + run_ids: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_ids",b"run_ids"]) -> None: ... +global___GetRunStatusRequest = GetRunStatusRequest + +class GetRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class RunStatusDictEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int + @property + def value(self) -> global___RunStatus: ... + def __init__(self, + *, + key: builtins.int = ..., + value: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + RUN_STATUS_DICT_FIELD_NUMBER: builtins.int + @property + def run_status_dict(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___RunStatus]: ... + def __init__(self, + *, + run_status_dict: typing.Optional[typing.Mapping[builtins.int, global___RunStatus]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_status_dict",b"run_status_dict"]) -> None: ... +global___GetRunStatusResponse = GetRunStatusResponse diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 3e044f9ec846..75b022dc65ea 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/server/__init__.py b/src/py/flwr/server/__init__.py index 896b46298327..1dde95b6b047 100644 --- a/src/py/flwr/server/__init__.py +++ b/src/py/flwr/server/__init__.py @@ -17,14 +17,12 @@ from . import strategy from . import workflow as workflow -from .app import run_superlink as run_superlink from .app import start_server as start_server from .client_manager import ClientManager as ClientManager from .client_manager import SimpleClientManager as SimpleClientManager from .compat import LegacyContext as LegacyContext from .driver import Driver as Driver from .history import History as History -from .run_serverapp import run_server_app as run_server_app from .server import Server as Server from .server_app import ServerApp as ServerApp from .server_config import ServerConfig as ServerConfig @@ -40,8 +38,6 @@ "ServerAppComponents", "ServerConfig", "SimpleClientManager", - "run_server_app", - "run_superlink", "start_server", "strategy", "workflow", diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index 32b490903554..d156edaa3c99 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -19,10 +19,11 @@ import importlib.util import sys import threading +from collections.abc import Sequence from logging import INFO, WARN from os.path import isfile from pathlib import Path -from typing import Optional, Sequence, Set, Tuple +from typing import Optional import grpc from cryptography.exceptions import UnsupportedAlgorithm @@ -36,6 +37,10 @@ from flwr.common.address import parse_address from flwr.common.config import get_flwr_dir from flwr.common.constant import ( + DRIVER_API_DEFAULT_ADDRESS, + FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + FLEET_API_REST_DEFAULT_ADDRESS, MISSING_EXTRA_REST, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, @@ -68,24 +73,19 @@ from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor from .superlink.state import StateFactory -ADDRESS_DRIVER_API = "0.0.0.0:9091" -ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" -ADDRESS_FLEET_API_GRPC_BIDI = "[::]:8080" # IPv6 to keep start_server compatible -ADDRESS_FLEET_API_REST = "0.0.0.0:9093" - DATABASE = ":flwr-in-memory-state:" BASE_DIR = get_flwr_dir() / "superlink" / "ffs" def start_server( # pylint: disable=too-many-arguments,too-many-locals *, - server_address: str = ADDRESS_FLEET_API_GRPC_BIDI, + server_address: str = FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> History: """Start a Flower server using the gRPC transport layer. @@ -232,9 +232,9 @@ def run_superlink() -> None: TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_GRPC_ADAPTER, ]: - args.fleet_api_address = ADDRESS_FLEET_API_GRPC_RERE + args.fleet_api_address = FLEET_API_GRPC_RERE_DEFAULT_ADDRESS elif args.fleet_api_type == TRANSPORT_TYPE_REST: - args.fleet_api_address = ADDRESS_FLEET_API_REST + args.fleet_api_address = FLEET_API_REST_DEFAULT_ADDRESS fleet_address, host, port = _format_address(args.fleet_api_address) @@ -334,7 +334,7 @@ def run_superlink() -> None: driver_server.wait_for_termination(timeout=1) -def _format_address(address: str) -> Tuple[str, str, int]: +def _format_address(address: str) -> tuple[str, str, int]: parsed_address = parse_address(address) if not parsed_address: sys.exit( @@ -346,8 +346,8 @@ def _format_address(address: str) -> Tuple[str, str, int]: def _try_setup_node_authentication( args: argparse.Namespace, - certificates: Optional[Tuple[bytes, bytes, bytes]], -) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> Optional[tuple[set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if ( not args.auth_list_public_keys and not args.auth_superlink_private_key @@ -382,7 +382,7 @@ def _try_setup_node_authentication( "to '--auth-list-public-keys'." ) - node_public_keys: Set[bytes] = set() + node_public_keys: set[bytes] = set() try: ssh_private_key = load_ssh_private_key( @@ -435,7 +435,7 @@ def _try_setup_node_authentication( def _try_obtain_certificates( args: argparse.Namespace, -) -> Optional[Tuple[bytes, bytes, bytes]]: +) -> Optional[tuple[bytes, bytes, bytes]]: # Obtain certificates if args.insecure: log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") @@ -491,7 +491,7 @@ def _run_fleet_api_grpc_rere( address: str, state_factory: StateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Run Fleet API (gRPC, request-response).""" @@ -519,7 +519,7 @@ def _run_fleet_api_grpc_adapter( address: str, state_factory: StateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: """Run Fleet API (GrpcAdapter).""" # Create Fleet API gRPC server @@ -653,7 +653,7 @@ def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: parser.add_argument( "--driver-api-address", help="Driver API (gRPC) server address (IPv4, IPv6, or a domain name).", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, ) diff --git a/src/py/flwr/server/client_manager.py b/src/py/flwr/server/client_manager.py index 7956e282bd2c..175bd4a786ea 100644 --- a/src/py/flwr/server/client_manager.py +++ b/src/py/flwr/server/client_manager.py @@ -19,7 +19,7 @@ import threading from abc import ABC, abstractmethod from logging import INFO -from typing import Dict, List, Optional +from typing import Optional from flwr.common.logger import log @@ -67,7 +67,7 @@ def unregister(self, client: ClientProxy) -> None: """ @abstractmethod - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" @abstractmethod @@ -80,7 +80,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" @@ -88,7 +88,7 @@ class SimpleClientManager(ClientManager): """Provides a pool of available clients.""" def __init__(self) -> None: - self.clients: Dict[str, ClientProxy] = {} + self.clients: dict[str, ClientProxy] = {} self._cv = threading.Condition() def __len__(self) -> int: @@ -170,7 +170,7 @@ def unregister(self, client: ClientProxy) -> None: with self._cv: self._cv.notify_all() - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" return self.clients @@ -179,7 +179,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" # Block until at least num_clients are connected. if min_num_clients is None: diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index e978359fa828..1d3e5024ba90 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -18,7 +18,6 @@ from logging import INFO from typing import Optional -from flwr.common import EventType, event from flwr.common.logger import log from flwr.server.client_manager import ClientManager from flwr.server.history import History @@ -65,8 +64,6 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals hist : flwr.server.history.History Object containing training and evaluation metrics. """ - event(EventType.START_DRIVER_ENTER) - # Initialize the Driver API server and config initialized_server, initialized_config = init_defaults( server=server, @@ -96,6 +93,4 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals f_stop.set() thread.join() - event(EventType.START_SERVER_LEAVE) - return hist diff --git a/src/py/flwr/server/compat/app_utils.py b/src/py/flwr/server/compat/app_utils.py index baff27307b88..8d2479f47d40 100644 --- a/src/py/flwr/server/compat/app_utils.py +++ b/src/py/flwr/server/compat/app_utils.py @@ -16,7 +16,6 @@ import threading -from typing import Dict, Tuple from ..client_manager import ClientManager from ..compat.driver_client_proxy import DriverClientProxy @@ -26,7 +25,7 @@ def start_update_client_manager_thread( driver: Driver, client_manager: ClientManager, -) -> Tuple[threading.Thread, threading.Event]: +) -> tuple[threading.Thread, threading.Event]: """Periodically update the nodes list in the client manager in a thread. This function starts a thread that periodically uses the associated driver to @@ -73,7 +72,7 @@ def _update_client_manager( ) -> None: """Update the nodes list in the client manager.""" # Loop until the driver is disconnected - registered_nodes: Dict[int, DriverClientProxy] = {} + registered_nodes: dict[int, DriverClientProxy] = {} while not f_stop.is_set(): all_node_ids = set(driver.get_node_ids()) dead_nodes = set(registered_nodes).difference(all_node_ids) diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index 31b917fa869b..a5b454c79f90 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -17,7 +17,8 @@ import unittest import unittest.mock -from typing import Any, Callable, Iterable, Optional, Union, cast +from collections.abc import Iterable +from typing import Any, Callable, Optional, Union, cast from unittest.mock import Mock import numpy as np diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index 4f888323e586..e8429e865db6 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -16,7 +16,8 @@ from abc import ABC, abstractmethod -from typing import Iterable, List, Optional +from collections.abc import Iterable +from typing import Optional from flwr.common import Message, RecordSet from flwr.common.typing import Run @@ -70,7 +71,7 @@ def create_message( # pylint: disable=too-many-arguments """ @abstractmethod - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" @abstractmethod diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index 80ce9623ab3f..421dfd30ecb2 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -16,12 +16,14 @@ import time import warnings +from collections.abc import Iterable from logging import DEBUG, WARNING -from typing import Iterable, List, Optional, cast +from typing import Optional, cast import grpc -from flwr.common import DEFAULT_TTL, EventType, Message, Metadata, RecordSet, event +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.serde import ( @@ -45,8 +47,6 @@ from .driver import Driver -DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" - ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ [Driver] Error: Not connected. @@ -73,7 +73,7 @@ class GrpcDriver(Driver): def __init__( # pylint: disable=too-many-arguments self, run_id: int, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + driver_service_address: str = DRIVER_API_DEFAULT_ADDRESS, root_certificates: Optional[bytes] = None, ) -> None: self._run_id = run_id @@ -94,7 +94,6 @@ def _connect(self) -> None: This will not call GetRun. """ - event(EventType.DRIVER_CONNECT) if self._is_connected: log(WARNING, "Already connected") return @@ -108,7 +107,6 @@ def _connect(self) -> None: def _disconnect(self) -> None: """Disconnect from the Driver API.""" - event(EventType.DRIVER_DISCONNECT) if not self._is_connected: log(DEBUG, "Already disconnected") return @@ -195,7 +193,7 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" self._init_run() # Call GrpcDriverStub method @@ -212,7 +210,7 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: """ self._init_run() # Construct TaskIns - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] for msg in messages: # Check message self._check_message(msg) @@ -258,7 +256,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py index 53406796750f..3a8a4b1bc73d 100644 --- a/src/py/flwr/server/driver/inmemory_driver.py +++ b/src/py/flwr/server/driver/inmemory_driver.py @@ -17,7 +17,8 @@ import time import warnings -from typing import Iterable, List, Optional, cast +from collections.abc import Iterable +from typing import Optional, cast from uuid import UUID from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet @@ -112,7 +113,7 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" self._init_run() return list(self.state.get_nodes(cast(Run, self._run).run_id)) @@ -123,7 +124,7 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: This method takes an iterable of messages and sends each message to the node specified in `dst_node_id`. """ - task_ids: List[str] = [] + task_ids: list[str] = [] for msg in messages: # Check message self._check_message(msg) @@ -169,7 +170,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) diff --git a/src/py/flwr/server/driver/inmemory_driver_test.py b/src/py/flwr/server/driver/inmemory_driver_test.py index ddfdb249c1b4..9e5aaeaa9ca7 100644 --- a/src/py/flwr/server/driver/inmemory_driver_test.py +++ b/src/py/flwr/server/driver/inmemory_driver_test.py @@ -17,7 +17,7 @@ import time import unittest -from typing import Iterable, List, Tuple +from collections.abc import Iterable from unittest.mock import MagicMock, patch from uuid import uuid4 @@ -38,7 +38,7 @@ from .inmemory_driver import InMemoryDriver -def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str], int]: +def push_messages(driver: InMemoryDriver, num_nodes: int) -> tuple[Iterable[str], int]: """Help push messages to state.""" for _ in range(num_nodes): driver.state.create_node(ping_interval=PING_MAX_INTERVAL) @@ -55,7 +55,7 @@ def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str] def get_replies( driver: InMemoryDriver, msg_ids: Iterable[str], node_id: int -) -> List[str]: +) -> list[str]: """Help create message replies and pull taskres from state.""" taskins = driver.state.get_task_ins(node_id, limit=len(list(msg_ids))) for taskin in taskins: diff --git a/src/py/flwr/server/history.py b/src/py/flwr/server/history.py index 291974a4323c..50daf2e04de6 100644 --- a/src/py/flwr/server/history.py +++ b/src/py/flwr/server/history.py @@ -17,7 +17,6 @@ import pprint from functools import reduce -from typing import Dict, List, Tuple from flwr.common.typing import Scalar @@ -26,11 +25,11 @@ class History: """History class for training and/or evaluation metrics collection.""" def __init__(self) -> None: - self.losses_distributed: List[Tuple[int, float]] = [] - self.losses_centralized: List[Tuple[int, float]] = [] - self.metrics_distributed_fit: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_distributed: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_centralized: Dict[str, List[Tuple[int, Scalar]]] = {} + self.losses_distributed: list[tuple[int, float]] = [] + self.losses_centralized: list[tuple[int, float]] = [] + self.metrics_distributed_fit: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_distributed: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_centralized: dict[str, list[tuple[int, Scalar]]] = {} def add_loss_distributed(self, server_round: int, loss: float) -> None: """Add one loss entry (from distributed evaluation).""" @@ -41,7 +40,7 @@ def add_loss_centralized(self, server_round: int, loss: float) -> None: self.losses_centralized.append((server_round, loss)) def add_metrics_distributed_fit( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed fit).""" for key in metrics: @@ -52,7 +51,7 @@ def add_metrics_distributed_fit( self.metrics_distributed_fit[key].append((server_round, metrics[key])) def add_metrics_distributed( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed evaluation).""" for key in metrics: @@ -63,7 +62,7 @@ def add_metrics_distributed( self.metrics_distributed[key].append((server_round, metrics[key])) def add_metrics_centralized( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from centralized evaluation).""" for key in metrics: diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index d9c363245a2e..a9ec05fe90e0 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -31,21 +31,20 @@ get_project_config, get_project_dir, ) +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.logger import log, update_console_handler, warn_deprecated_feature from flwr.common.object_ref import load_app from flwr.common.typing import UserConfig -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, ) -from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from .driver import Driver from .driver.grpc_driver import GrpcDriver from .server_app import LoadServerAppError, ServerApp -ADDRESS_DRIVER_API = "0.0.0.0:9091" - def run( driver: Driver, @@ -112,11 +111,11 @@ def run_server_app() -> None: "app by executing `flwr new` and following the prompt." ) - if args.server != ADDRESS_DRIVER_API: + if args.server != DRIVER_API_DEFAULT_ADDRESS: warn = "Passing flag --server is deprecated. Use --superlink instead." warn_deprecated_feature(warn) - if args.superlink != ADDRESS_DRIVER_API: + if args.superlink != DRIVER_API_DEFAULT_ADDRESS: # if `--superlink` also passed, then # warn user that this argument overrides what was passed with `--server` log( @@ -275,12 +274,12 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: ) parser.add_argument( "--server", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, help="Server address", ) parser.add_argument( "--superlink", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, help="SuperLink Driver API (gRPC-rere) address (IPv4, IPv6, or a domain name)", ) parser.add_argument( diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index 5e2a0c6b2719..bdaa11ba20a2 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -19,7 +19,7 @@ import io import timeit from logging import INFO, WARN -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( Code, @@ -41,17 +41,17 @@ from .server_config import ServerConfig -FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], - List[Union[Tuple[ClientProxy, FitRes], BaseException]], +FitResultsAndFailures = tuple[ + list[tuple[ClientProxy, FitRes]], + list[Union[tuple[ClientProxy, FitRes], BaseException]], ] -EvaluateResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, EvaluateRes]], - List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], +EvaluateResultsAndFailures = tuple[ + list[tuple[ClientProxy, EvaluateRes]], + list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ] -ReconnectResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, DisconnectRes]], - List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]], +ReconnectResultsAndFailures = tuple[ + list[tuple[ClientProxy, DisconnectRes]], + list[Union[tuple[ClientProxy, DisconnectRes], BaseException]], ] @@ -84,7 +84,7 @@ def client_manager(self) -> ClientManager: return self._client_manager # pylint: disable=too-many-locals - def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float]: + def fit(self, num_rounds: int, timeout: Optional[float]) -> tuple[History, float]: """Run federated averaging for a number of rounds.""" history = History() @@ -163,7 +163,7 @@ def evaluate_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[float], Dict[str, Scalar], EvaluateResultsAndFailures] + tuple[Optional[float], dict[str, Scalar], EvaluateResultsAndFailures] ]: """Validate current global model on a number of clients.""" # Get clients and their respective instructions from strategy @@ -197,9 +197,9 @@ def evaluate_round( ) # Aggregate the evaluation results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[float], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_evaluate(server_round, results, failures) loss_aggregated, metrics_aggregated = aggregated_result @@ -210,7 +210,7 @@ def fit_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + tuple[Optional[Parameters], dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -245,9 +245,9 @@ def fit_round( ) # Aggregate training results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[Parameters], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result @@ -296,7 +296,7 @@ def _get_initial_parameters( def reconnect_clients( - client_instructions: List[Tuple[ClientProxy, ReconnectIns]], + client_instructions: list[tuple[ClientProxy, ReconnectIns]], max_workers: Optional[int], timeout: Optional[float], ) -> ReconnectResultsAndFailures: @@ -312,8 +312,8 @@ def reconnect_clients( ) # Gather results - results: List[Tuple[ClientProxy, DisconnectRes]] = [] - failures: List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]] = [] + results: list[tuple[ClientProxy, DisconnectRes]] = [] + failures: list[Union[tuple[ClientProxy, DisconnectRes], BaseException]] = [] for future in finished_fs: failure = future.exception() if failure is not None: @@ -328,7 +328,7 @@ def reconnect_client( client: ClientProxy, reconnect: ReconnectIns, timeout: Optional[float], -) -> Tuple[ClientProxy, DisconnectRes]: +) -> tuple[ClientProxy, DisconnectRes]: """Instruct client to disconnect and (optionally) reconnect later.""" disconnect = client.reconnect( reconnect, @@ -339,7 +339,7 @@ def reconnect_client( def fit_clients( - client_instructions: List[Tuple[ClientProxy, FitIns]], + client_instructions: list[tuple[ClientProxy, FitIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -356,8 +356,8 @@ def fit_clients( ) # Gather results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_fit( future=future, results=results, failures=failures @@ -367,7 +367,7 @@ def fit_clients( def fit_client( client: ClientProxy, ins: FitIns, timeout: Optional[float], group_id: int -) -> Tuple[ClientProxy, FitRes]: +) -> tuple[ClientProxy, FitRes]: """Refine parameters on a single client.""" fit_res = client.fit(ins, timeout=timeout, group_id=group_id) return client, fit_res @@ -375,8 +375,8 @@ def fit_client( def _handle_finished_future_after_fit( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -386,7 +386,7 @@ def _handle_finished_future_after_fit( return # Successfully received a result from a client - result: Tuple[ClientProxy, FitRes] = future.result() + result: tuple[ClientProxy, FitRes] = future.result() _, res = result # Check result status code @@ -399,7 +399,7 @@ def _handle_finished_future_after_fit( def evaluate_clients( - client_instructions: List[Tuple[ClientProxy, EvaluateIns]], + client_instructions: list[tuple[ClientProxy, EvaluateIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -416,8 +416,8 @@ def evaluate_clients( ) # Gather results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_evaluate( future=future, results=results, failures=failures @@ -430,7 +430,7 @@ def evaluate_client( ins: EvaluateIns, timeout: Optional[float], group_id: int, -) -> Tuple[ClientProxy, EvaluateRes]: +) -> tuple[ClientProxy, EvaluateRes]: """Evaluate parameters on a single client.""" evaluate_res = client.evaluate(ins, timeout=timeout, group_id=group_id) return client, evaluate_res @@ -438,8 +438,8 @@ def evaluate_client( def _handle_finished_future_after_evaluate( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -449,7 +449,7 @@ def _handle_finished_future_after_evaluate( return # Successfully received a result from a client - result: Tuple[ClientProxy, EvaluateRes] = future.result() + result: tuple[ClientProxy, EvaluateRes] = future.result() _, res = result # Check result status code @@ -466,7 +466,7 @@ def init_defaults( config: Optional[ServerConfig], strategy: Optional[Strategy], client_manager: Optional[ClientManager], -) -> Tuple[Server, ServerConfig]: +) -> tuple[Server, ServerConfig]: """Create server instance if none was given.""" if server is None: if client_manager is None: diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index b80811a6f730..6e8f423fe115 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -19,7 +19,7 @@ import csv import tempfile from pathlib import Path -from typing import List, Optional +from typing import Optional import numpy as np from cryptography.hazmat.primitives.asymmetric import ec @@ -143,7 +143,7 @@ def reconnect( def test_fit_clients() -> None: """Test fit_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] @@ -164,7 +164,7 @@ def test_fit_clients() -> None: def test_eval_clients() -> None: """Test eval_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index c668b55eebe6..d5ee7340f8ea 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -16,7 +16,7 @@ # mypy: disallow_untyped_calls=False from functools import reduce -from typing import Any, Callable, List, Tuple +from typing import Any, Callable import numpy as np @@ -24,7 +24,7 @@ from flwr.server.client_proxy import ClientProxy -def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute weighted average.""" # Calculate the total number of examples used during training num_examples_total = sum(num_examples for (_, num_examples) in results) @@ -42,7 +42,7 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: return weights_prime -def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: +def aggregate_inplace(results: list[tuple[ClientProxy, FitRes]]) -> NDArrays: """Compute in-place weighted average.""" # Count total examples num_examples_total = sum(fit_res.num_examples for (_, fit_res) in results) @@ -67,7 +67,7 @@ def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: return params -def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate_median(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute median.""" # Create a list of weights and ignore the number of examples weights = [weights for weights, _ in results] @@ -80,7 +80,7 @@ def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: def aggregate_krum( - results: List[Tuple[NDArrays, int]], num_malicious: int, to_keep: int + results: list[tuple[NDArrays, int]], num_malicious: int, to_keep: int ) -> NDArrays: """Choose one parameter vector according to the Krum function. @@ -119,7 +119,7 @@ def aggregate_krum( # pylint: disable=too-many-locals def aggregate_bulyan( - results: List[Tuple[NDArrays, int]], + results: list[tuple[NDArrays, int]], num_malicious: int, aggregation_rule: Callable, # type: ignore **aggregation_rule_kwargs: Any, @@ -155,7 +155,7 @@ def aggregate_bulyan( "It is needed to ensure that the method reduces the attacker's leeway to " "the one proved in the paper." ) - selected_models_set: List[Tuple[NDArrays, int]] = [] + selected_models_set: list[tuple[NDArrays, int]] = [] theta = len(results) - 2 * num_malicious beta = theta - 2 * num_malicious @@ -200,7 +200,7 @@ def aggregate_bulyan( return parameters_aggregated -def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: +def weighted_loss_avg(results: list[tuple[int, float]]) -> float: """Aggregate evaluation results obtained from multiple clients.""" num_total_evaluation_examples = sum(num_examples for (num_examples, _) in results) weighted_losses = [num_examples * loss for num_examples, loss in results] @@ -208,7 +208,7 @@ def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: def aggregate_qffl( - parameters: NDArrays, deltas: List[NDArrays], hs_fll: List[NDArrays] + parameters: NDArrays, deltas: list[NDArrays], hs_fll: list[NDArrays] ) -> NDArrays: """Compute weighted average based on Q-FFL paper.""" demominator: float = np.sum(np.asarray(hs_fll)) @@ -225,7 +225,7 @@ def aggregate_qffl( return new_parameters -def _compute_distances(weights: List[NDArrays]) -> NDArray: +def _compute_distances(weights: list[NDArrays]) -> NDArray: """Compute distances between vectors. Input: weights - list of weights vectors @@ -265,7 +265,7 @@ def _trim_mean(array: NDArray, proportiontocut: float) -> NDArray: def aggregate_trimmed_avg( - results: List[Tuple[NDArrays, int]], proportiontocut: float + results: list[tuple[NDArrays, int]], proportiontocut: float ) -> NDArrays: """Compute trimmed average.""" # Create a list of weights and ignore the number of examples @@ -290,7 +290,7 @@ def _check_weights_equality(weights1: NDArrays, weights2: NDArrays) -> bool: def _find_reference_weights( - reference_weights: NDArrays, list_of_weights: List[NDArrays] + reference_weights: NDArrays, list_of_weights: list[NDArrays] ) -> int: """Find the reference weights by looping through the `list_of_weights`. @@ -320,7 +320,7 @@ def _find_reference_weights( def _aggregate_n_closest_weights( - reference_weights: NDArrays, results: List[Tuple[NDArrays, int]], beta_closest: int + reference_weights: NDArrays, results: list[tuple[NDArrays, int]], beta_closest: int ) -> NDArrays: """Calculate element-wise mean of the `N` closest values. diff --git a/src/py/flwr/server/strategy/aggregate_test.py b/src/py/flwr/server/strategy/aggregate_test.py index f8b4e3c03b50..9f9dba79ec7c 100644 --- a/src/py/flwr/server/strategy/aggregate_test.py +++ b/src/py/flwr/server/strategy/aggregate_test.py @@ -15,8 +15,6 @@ """Aggregation function tests.""" -from typing import List, Tuple - import numpy as np from .aggregate import ( @@ -49,7 +47,7 @@ def test_aggregate() -> None: def test_weighted_loss_avg_single_value() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(5, 0.5)] + results: list[tuple[int, float]] = [(5, 0.5)] expected = 0.5 # Execute @@ -62,7 +60,7 @@ def test_weighted_loss_avg_single_value() -> None: def test_weighted_loss_avg_multiple_values() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] + results: list[tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] expected = 1.5 # Execute diff --git a/src/py/flwr/server/strategy/bulyan.py b/src/py/flwr/server/strategy/bulyan.py index a81406c255ad..84a261237ac5 100644 --- a/src/py/flwr/server/strategy/bulyan.py +++ b/src/py/flwr/server/strategy/bulyan.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union from flwr.common import ( FitRes, @@ -86,12 +86,12 @@ def __init__( num_malicious_clients: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -125,9 +125,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Bulyan.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/bulyan_test.py b/src/py/flwr/server/strategy/bulyan_test.py index 93a9ebda3783..c0b87c82a036 100644 --- a/src/py/flwr/server/strategy/bulyan_test.py +++ b/src/py/flwr/server/strategy/bulyan_test.py @@ -15,7 +15,6 @@ """Bulyan tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -62,7 +61,7 @@ def test_aggregate_fit() -> None: param_5: Parameters = ndarrays_to_parameters( [array([0.1, 0.1, 0.1, 0.1], dtype=float32)] ) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index b25e1efdf0e9..77e70bb9af04 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -20,7 +20,7 @@ import math from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -156,14 +156,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -172,9 +172,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -245,15 +245,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -372,7 +372,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -385,7 +385,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -394,9 +394,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -432,7 +432,7 @@ def aggregate_fit( return aggregated_params, metrics - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculate the number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -457,14 +457,14 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 92b2845fd846..2ca253c96370 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -19,7 +19,7 @@ from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( EvaluateIns, @@ -117,14 +117,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -133,9 +133,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Compute the updates, clip, and pass them for aggregation. Afterward, add noise to the aggregated parameters. @@ -191,15 +191,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -285,7 +285,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -298,7 +298,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -307,9 +307,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Add noise to the aggregated parameters.""" if failures: return None, {} @@ -348,14 +348,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py index 423ddddeb379..ab513aba2269 100644 --- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py +++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py @@ -19,7 +19,7 @@ import math -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -80,7 +80,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {"dpfedavg_adaptive_clip_enabled": True} @@ -93,7 +93,7 @@ def configure_fit( return client_instructions - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculating number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -118,9 +118,9 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results as in DPFedAvgFixed and update clip norms.""" if failures: return None, {} diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py index d122f0688922..4ea84db30cd4 100644 --- a/src/py/flwr/server/strategy/dpfedavg_fixed.py +++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py @@ -17,7 +17,7 @@ Paper: arxiv.org/pdf/1710.06963.pdf """ -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.dp import add_gaussian_noise @@ -79,7 +79,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training incorporating Differential Privacy (DP). Configuration of the next training round includes information related to DP, @@ -119,7 +119,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation using the specified strategy. Parameters @@ -147,9 +147,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results using unweighted aggregation.""" if failures: return None, {} @@ -168,14 +168,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py index 663ac8872c39..60213db2efeb 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateRes, @@ -49,12 +49,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, min_completion_rate_fit: float = 0.5, min_completion_rate_evaluate: float = 0.5, initial_parameters: Optional[Parameters] = None, @@ -85,9 +85,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -117,9 +117,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py index 98f4cac032cb..a01a3a5c0ad5 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py @@ -15,7 +15,7 @@ """FaultTolerantFedAvg tests.""" -from typing import List, Optional, Tuple, Union +from typing import Optional, Union from unittest.mock import MagicMock from flwr.common import ( @@ -36,8 +36,8 @@ def test_aggregate_fit_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[Parameters] = None # Execute @@ -51,8 +51,8 @@ def test_aggregate_fit_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[Parameters] = None # Execute @@ -66,7 +66,7 @@ def test_aggregate_fit_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -77,7 +77,7 @@ def test_aggregate_fit_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [ Exception(), Exception(), ] @@ -94,7 +94,7 @@ def test_aggregate_fit_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -105,7 +105,7 @@ def test_aggregate_fit_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[NDArrays] = [] # Execute @@ -120,7 +120,7 @@ def test_aggregate_fit_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.99) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -131,7 +131,7 @@ def test_aggregate_fit_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[NDArrays] = [] # Execute @@ -146,8 +146,8 @@ def test_aggregate_evaluate_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = None # Execute @@ -161,8 +161,8 @@ def test_aggregate_evaluate_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = None @@ -178,7 +178,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -189,7 +189,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception(), Exception(), ] @@ -206,7 +206,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -217,7 +217,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = 2.3 @@ -233,7 +233,7 @@ def test_aggregate_evaluate_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.99) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -244,7 +244,7 @@ def test_aggregate_evaluate_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = 2.3 # Execute diff --git a/src/py/flwr/server/strategy/fedadagrad.py b/src/py/flwr/server/strategy/fedadagrad.py index f13c5358da25..75befdd0e796 100644 --- a/src/py/flwr/server/strategy/fedadagrad.py +++ b/src/py/flwr/server/strategy/fedadagrad.py @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -89,12 +89,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, accept_failures: bool = True, @@ -131,9 +131,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedadagrad_test.py b/src/py/flwr/server/strategy/fedadagrad_test.py index b43a4c75d123..96d98fe750f3 100644 --- a/src/py/flwr/server/strategy/fedadagrad_test.py +++ b/src/py/flwr/server/strategy/fedadagrad_test.py @@ -15,7 +15,6 @@ """FedAdagrad tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -54,7 +53,7 @@ def test_aggregate_fit() -> None: bridge = MagicMock() client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( diff --git a/src/py/flwr/server/strategy/fedadam.py b/src/py/flwr/server/strategy/fedadam.py index dc90e90c7568..d0f87a43f79b 100644 --- a/src/py/flwr/server/strategy/fedadam.py +++ b/src/py/flwr/server/strategy/fedadam.py @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py index 3b9b2640c2b5..2d0b855c3186 100644 --- a/src/py/flwr/server/strategy/fedavg.py +++ b/src/py/flwr/server/strategy/fedavg.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateIns, @@ -99,12 +99,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -138,12 +138,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -158,7 +158,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -172,7 +172,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -193,7 +193,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: @@ -220,9 +220,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -256,9 +256,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_android.py b/src/py/flwr/server/strategy/fedavg_android.py index 2f49cf8784c9..bcecf8efb504 100644 --- a/src/py/flwr/server/strategy/fedavg_android.py +++ b/src/py/flwr/server/strategy/fedavg_android.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast import numpy as np @@ -81,12 +81,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, ) -> None: @@ -107,12 +107,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -127,7 +127,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -141,7 +141,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -162,7 +162,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -189,9 +189,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -208,9 +208,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_test.py b/src/py/flwr/server/strategy/fedavg_test.py index e62eaa5c5832..66241c3ab66a 100644 --- a/src/py/flwr/server/strategy/fedavg_test.py +++ b/src/py/flwr/server/strategy/fedavg_test.py @@ -15,7 +15,7 @@ """FedAvg tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock import numpy as np @@ -140,7 +140,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: weights1_0 = np.random.randn(100, 64) weights1_1 = np.random.randn(314, 628, 3) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -160,7 +160,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] fedavg_reference = FedAvg(inplace=False) fedavg_inplace = FedAvg() diff --git a/src/py/flwr/server/strategy/fedavgm.py b/src/py/flwr/server/strategy/fedavgm.py index ab3d37249db6..a7c37c38770f 100644 --- a/src/py/flwr/server/strategy/fedavgm.py +++ b/src/py/flwr/server/strategy/fedavgm.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -84,12 +84,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -132,9 +132,9 @@ def initialize_parameters( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavgm_test.py b/src/py/flwr/server/strategy/fedavgm_test.py index 39da5f4b82c4..400fa3c97247 100644 --- a/src/py/flwr/server/strategy/fedavgm_test.py +++ b/src/py/flwr/server/strategy/fedavgm_test.py @@ -15,7 +15,7 @@ """FedAvgM tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock from numpy import array, float32 @@ -41,7 +41,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -61,7 +61,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), @@ -94,7 +94,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -114,7 +114,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), diff --git a/src/py/flwr/server/strategy/fedmedian.py b/src/py/flwr/server/strategy/fedmedian.py index e7cba5324fa8..35044d42b22c 100644 --- a/src/py/flwr/server/strategy/fedmedian.py +++ b/src/py/flwr/server/strategy/fedmedian.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( FitRes, @@ -46,9 +46,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using median.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedmedian_test.py b/src/py/flwr/server/strategy/fedmedian_test.py index 3960ad70b145..2c9881635319 100644 --- a/src/py/flwr/server/strategy/fedmedian_test.py +++ b/src/py/flwr/server/strategy/fedmedian_test.py @@ -15,7 +15,6 @@ """FedMedian tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -159,7 +158,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( diff --git a/src/py/flwr/server/strategy/fedopt.py b/src/py/flwr/server/strategy/fedopt.py index c581d4797123..3e143fc3ca59 100644 --- a/src/py/flwr/server/strategy/fedopt.py +++ b/src/py/flwr/server/strategy/fedopt.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, Optional, Tuple +from typing import Callable, Optional from flwr.common import ( MetricsAggregationFn, @@ -86,12 +86,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, diff --git a/src/py/flwr/server/strategy/fedprox.py b/src/py/flwr/server/strategy/fedprox.py index f15271e06060..218fece0491f 100644 --- a/src/py/flwr/server/strategy/fedprox.py +++ b/src/py/flwr/server/strategy/fedprox.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Optional from flwr.common import FitIns, MetricsAggregationFn, NDArrays, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -113,12 +113,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -148,7 +148,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Sends the proximal factor mu to the clients diff --git a/src/py/flwr/server/strategy/fedtrimmedavg.py b/src/py/flwr/server/strategy/fedtrimmedavg.py index 96b0d35e7a61..8a0e4e50fbff 100644 --- a/src/py/flwr/server/strategy/fedtrimmedavg.py +++ b/src/py/flwr/server/strategy/fedtrimmedavg.py @@ -17,7 +17,7 @@ Paper: arxiv.org/abs/1803.01498 """ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -78,12 +78,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -114,9 +114,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using trimmed average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedxgb_bagging.py b/src/py/flwr/server/strategy/fedxgb_bagging.py index a74ee81976a6..1e55466808f8 100644 --- a/src/py/flwr/server/strategy/fedxgb_bagging.py +++ b/src/py/flwr/server/strategy/fedxgb_bagging.py @@ -17,7 +17,7 @@ import json from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Optional, Union, cast from flwr.common import EvaluateRes, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -34,8 +34,8 @@ def __init__( self, evaluate_function: Optional[ Callable[ - [int, Parameters, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, Parameters, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, **kwargs: Any, @@ -52,9 +52,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -79,9 +79,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -101,7 +101,7 @@ def aggregate_evaluate( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_function is None: # No evaluation function provided @@ -152,7 +152,7 @@ def aggregate( return bst_prev_bytes -def _get_tree_nums(xgb_model_org: bytes) -> Tuple[int, int]: +def _get_tree_nums(xgb_model_org: bytes) -> tuple[int, int]: xgb_model = json.loads(bytearray(xgb_model_org)) # Get the number of trees tree_num = int( diff --git a/src/py/flwr/server/strategy/fedxgb_cyclic.py b/src/py/flwr/server/strategy/fedxgb_cyclic.py index 75025a89728b..c2dc3d797c7e 100644 --- a/src/py/flwr/server/strategy/fedxgb_cyclic.py +++ b/src/py/flwr/server/strategy/fedxgb_cyclic.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Optional, Union, cast from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -45,9 +45,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -69,9 +69,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -91,7 +91,7 @@ def aggregate_evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -117,7 +117,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index 4562663287ae..a7da4a919af7 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -22,7 +22,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union from flwr.common import FitRes, Scalar, ndarrays_to_parameters, parameters_to_ndarrays from flwr.common.logger import log, warn_deprecated_feature @@ -56,7 +56,7 @@ def __repr__(self) -> str: def evaluate( self, server_round: int, parameters: Any - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -70,9 +70,9 @@ def evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Any], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Any], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedyogi.py b/src/py/flwr/server/strategy/fedyogi.py index c7b2ebb51667..11873d1b781f 100644 --- a/src/py/flwr/server/strategy/fedyogi.py +++ b/src/py/flwr/server/strategy/fedyogi.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/krum.py b/src/py/flwr/server/strategy/krum.py index 074d018c35a3..5d33874b9789 100644 --- a/src/py/flwr/server/strategy/krum.py +++ b/src/py/flwr/server/strategy/krum.py @@ -21,7 +21,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -87,12 +87,12 @@ def __init__( num_clients_to_keep: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -123,9 +123,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Krum.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/krum_test.py b/src/py/flwr/server/strategy/krum_test.py index b34982325b39..dc996b480630 100644 --- a/src/py/flwr/server/strategy/krum_test.py +++ b/src/py/flwr/server/strategy/krum_test.py @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -160,7 +159,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( diff --git a/src/py/flwr/server/strategy/multikrum_test.py b/src/py/flwr/server/strategy/multikrum_test.py index 7a1a4c3ecf38..90607e2c0edc 100644 --- a/src/py/flwr/server/strategy/multikrum_test.py +++ b/src/py/flwr/server/strategy/multikrum_test.py @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -59,7 +58,7 @@ def test_aggregate_fit() -> None: client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( diff --git a/src/py/flwr/server/strategy/qfedavg.py b/src/py/flwr/server/strategy/qfedavg.py index 26a397d4cf8c..30a3cc53ee94 100644 --- a/src/py/flwr/server/strategy/qfedavg.py +++ b/src/py/flwr/server/strategy/qfedavg.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -60,12 +60,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -95,19 +95,19 @@ def __repr__(self) -> str: rep += f"q_param={self.q_param}, pre_weights={self.pre_weights})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" weights = parameters_to_ndarrays(parameters) self.pre_weights = weights @@ -131,7 +131,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -158,9 +158,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -229,9 +229,9 @@ def norm_grad(grad_list: NDArrays) -> float: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/strategy.py b/src/py/flwr/server/strategy/strategy.py index cfdfe2e246c5..14999e9a8993 100644 --- a/src/py/flwr/server/strategy/strategy.py +++ b/src/py/flwr/server/strategy/strategy.py @@ -16,7 +16,7 @@ from abc import ABC, abstractmethod -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -47,7 +47,7 @@ def initialize_parameters( @abstractmethod def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Parameters @@ -72,9 +72,9 @@ def configure_fit( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results. Parameters @@ -108,7 +108,7 @@ def aggregate_fit( @abstractmethod def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation. Parameters @@ -134,9 +134,9 @@ def configure_evaluate( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation results. Parameters @@ -164,7 +164,7 @@ def aggregate_evaluate( @abstractmethod def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate the current model parameters. This function can be used to perform centralized (i.e., server-side) evaluation diff --git a/src/py/flwr/server/superlink/driver/driver_grpc.py b/src/py/flwr/server/superlink/driver/driver_grpc.py index b7b914206f72..70354387812e 100644 --- a/src/py/flwr/server/superlink/driver/driver_grpc.py +++ b/src/py/flwr/server/superlink/driver/driver_grpc.py @@ -15,7 +15,7 @@ """Driver gRPC API.""" from logging import INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -35,7 +35,7 @@ def run_driver_api_grpc( address: str, state_factory: StateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: """Run Driver API (gRPC, request-response).""" # Create Driver API gRPC server diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index 73cd1c73a6fd..3cafb3e71f7c 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -17,7 +17,7 @@ import time from logging import DEBUG -from typing import List, Optional, Set +from typing import Optional from uuid import UUID import grpc @@ -32,8 +32,6 @@ from flwr.common.typing import Fab from flwr.proto import driver_pb2_grpc # pylint: disable=E0611 from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -44,6 +42,8 @@ from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + CreateRunRequest, + CreateRunResponse, GetRunRequest, GetRunResponse, Run, @@ -68,8 +68,8 @@ def GetNodes( """Get available nodes.""" log(DEBUG, "DriverServicer.GetNodes") state: State = self.state_factory.state() - all_ids: Set[int] = state.get_nodes(request.run_id) - nodes: List[Node] = [ + all_ids: set[int] = state.get_nodes(request.run_id) + nodes: list[Node] = [ Node(node_id=node_id, anonymous=False) for node_id in all_ids ] return GetNodesResponse(nodes=nodes) @@ -119,7 +119,7 @@ def PushTaskIns( state: State = self.state_factory.state() # Store each TaskIns - task_ids: List[Optional[UUID]] = [] + task_ids: list[Optional[UUID]] = [] for task_ins in request.task_ins_list: task_id: Optional[UUID] = state.store_task_ins(task_ins=task_ins) task_ids.append(task_id) @@ -135,7 +135,7 @@ def PullTaskRes( log(DEBUG, "DriverServicer.PullTaskRes") # Convert each task_id str to UUID - task_ids: Set[UUID] = {UUID(task_id) for task_id in request.task_ids} + task_ids: set[UUID] = {UUID(task_id) for task_id in request.task_ids} # Init state state: State = self.state_factory.state() @@ -155,7 +155,7 @@ def on_rpc_done() -> None: context.add_callback(on_rpc_done) # Read from state - task_res_list: List[TaskRes] = state.get_task_res(task_ids=task_ids, limit=None) + task_res_list: list[TaskRes] = state.get_task_res(task_ids=task_ids, limit=None) context.set_code(grpc.StatusCode.OK) return PullTaskResResponse(task_res_list=task_res_list) diff --git a/src/py/flwr/server/superlink/ffs/disk_ffs.py b/src/py/flwr/server/superlink/ffs/disk_ffs.py index 98ec4f93498f..4f1ab05be9a2 100644 --- a/src/py/flwr/server/superlink/ffs/disk_ffs.py +++ b/src/py/flwr/server/superlink/ffs/disk_ffs.py @@ -17,7 +17,7 @@ import hashlib import json from pathlib import Path -from typing import Dict, List, Optional, Tuple +from typing import Optional from flwr.server.superlink.ffs.ffs import Ffs @@ -35,7 +35,7 @@ def __init__(self, base_dir: str) -> None: """ self.base_dir = Path(base_dir) - def put(self, content: bytes, meta: Dict[str, str]) -> str: + def put(self, content: bytes, meta: dict[str, str]) -> str: """Store bytes and metadata and return key (hash of content). Parameters @@ -58,7 +58,7 @@ def put(self, content: bytes, meta: Dict[str, str]) -> str: return content_hash - def get(self, key: str) -> Optional[Tuple[bytes, Dict[str, str]]]: + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: """Return tuple containing the object content and metadata. Parameters @@ -90,7 +90,7 @@ def delete(self, key: str) -> None: (self.base_dir / key).unlink() (self.base_dir / f"{key}.META").unlink() - def list(self) -> List[str]: + def list(self) -> list[str]: """List all keys. Return all available keys in this `Ffs` instance. diff --git a/src/py/flwr/server/superlink/ffs/ffs.py b/src/py/flwr/server/superlink/ffs/ffs.py index fab3b1fdfb3e..b1d26e74c157 100644 --- a/src/py/flwr/server/superlink/ffs/ffs.py +++ b/src/py/flwr/server/superlink/ffs/ffs.py @@ -16,14 +16,14 @@ import abc -from typing import Dict, List, Optional, Tuple +from typing import Optional class Ffs(abc.ABC): # pylint: disable=R0904 """Abstract Flower File Storage interface for large objects.""" @abc.abstractmethod - def put(self, content: bytes, meta: Dict[str, str]) -> str: + def put(self, content: bytes, meta: dict[str, str]) -> str: """Store bytes and metadata and return sha256hex hash of data as str. Parameters @@ -40,7 +40,7 @@ def put(self, content: bytes, meta: Dict[str, str]) -> str: """ @abc.abstractmethod - def get(self, key: str) -> Optional[Tuple[bytes, Dict[str, str]]]: + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: """Return tuple containing the object content and metadata. Parameters @@ -65,7 +65,7 @@ def delete(self, key: str) -> None: """ @abc.abstractmethod - def list(self) -> List[str]: + def list(self) -> list[str]: """List keys of all stored objects. Return all available keys in this `Ffs` instance. diff --git a/src/py/flwr/server/superlink/ffs/ffs_test.py b/src/py/flwr/server/superlink/ffs/ffs_test.py index f7fbbf1218e1..5cf28cfd2cbe 100644 --- a/src/py/flwr/server/superlink/ffs/ffs_test.py +++ b/src/py/flwr/server/superlink/ffs/ffs_test.py @@ -21,7 +21,6 @@ import tempfile import unittest from abc import abstractmethod -from typing import Dict from flwr.server.superlink.ffs import DiskFfs, Ffs @@ -65,7 +64,7 @@ def test_get(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) @@ -93,7 +92,7 @@ def test_delete(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) @@ -117,7 +116,7 @@ def test_list(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) diff --git a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py index 278e20eb1d69..dbfbb236a7e4 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py @@ -16,7 +16,7 @@ from logging import DEBUG, INFO -from typing import Callable, Type, TypeVar +from typing import Callable, TypeVar import grpc from google.protobuf.message import Message as GrpcMessage @@ -47,7 +47,7 @@ def _handle( msg_container: MessageContainer, - request_type: Type[T], + request_type: type[T], handler: Callable[[T], GrpcMessage], ) -> MessageContainer: req = request_type.FromString(msg_container.grpc_message_content) diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py index 79f1a8f9902b..38f0dfdae299 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py @@ -19,7 +19,8 @@ """ import uuid -from typing import Callable, Iterator +from collections.abc import Iterator +from typing import Callable import grpc from iterators import TimeoutIterator diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py index 5fe0396696ab..476e2914f4d9 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py @@ -15,10 +15,11 @@ """Provides class GrpcBridge.""" +from collections.abc import Iterator from dataclasses import dataclass from enum import Enum from threading import Condition -from typing import Iterator, Optional +from typing import Optional from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py index f9b6b97030f0..6d9e081d8dd4 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py @@ -17,7 +17,7 @@ import time from threading import Thread -from typing import List, Union +from typing import Union from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, @@ -32,7 +32,7 @@ def start_worker( - rounds: int, bridge: GrpcBridge, results: List[ClientMessage] + rounds: int, bridge: GrpcBridge, results: list[ClientMessage] ) -> Thread: """Simulate processing loop with five calls.""" @@ -59,7 +59,7 @@ def test_workflow_successful() -> None: """Test full workflow.""" # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -90,7 +90,7 @@ def test_workflow_close() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -135,7 +135,7 @@ def test_ins_wrapper_iterator_close_while_blocking() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index dd78acb72fb1..b161492000f2 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -17,8 +17,9 @@ import concurrent.futures import sys +from collections.abc import Sequence from logging import ERROR -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import grpc @@ -46,7 +47,7 @@ AddServicerToServerFn = Callable[..., Any] -def valid_certificates(certificates: Tuple[bytes, bytes, bytes]) -> bool: +def valid_certificates(certificates: tuple[bytes, bytes, bytes]) -> bool: """Validate certificates tuple.""" is_valid = ( all(isinstance(certificate, bytes) for certificate in certificates) @@ -65,7 +66,7 @@ def start_grpc_server( # pylint: disable=too-many-arguments max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> grpc.Server: """Create and start a gRPC server running FlowerServiceServicer. @@ -157,16 +158,16 @@ def start_grpc_server( # pylint: disable=too-many-arguments def generic_create_grpc_server( # pylint: disable=too-many-arguments servicer_and_add_fn: Union[ - Tuple[FleetServicer, AddServicerToServerFn], - Tuple[GrpcAdapterServicer, AddServicerToServerFn], - Tuple[FlowerServiceServicer, AddServicerToServerFn], - Tuple[DriverServicer, AddServicerToServerFn], + tuple[FleetServicer, AddServicerToServerFn], + tuple[GrpcAdapterServicer, AddServicerToServerFn], + tuple[FlowerServiceServicer, AddServicerToServerFn], + tuple[DriverServicer, AddServicerToServerFn], ], server_address: str, max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Create a gRPC server with a single servicer. diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py index 7ff730b17afa..9635993e0ad5 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py @@ -20,7 +20,7 @@ from contextlib import closing from os.path import abspath, dirname, join from pathlib import Path -from typing import Tuple, cast +from typing import cast from flwr.server.client_manager import SimpleClientManager from flwr.server.superlink.fleet.grpc_bidi.grpc_server import ( @@ -31,7 +31,7 @@ root_dir = dirname(abspath(join(__file__, "../../../../../../.."))) -def load_certificates() -> Tuple[str, str, str]: +def load_certificates() -> tuple[str, str, str]: """Generate and load SSL credentials/certificates. Utility function for loading for SSL-enabled gRPC servertests. diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py index 70b38f8b625e..d836a74bef2e 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -16,8 +16,9 @@ import base64 +from collections.abc import Sequence from logging import INFO, WARNING -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -68,7 +69,7 @@ def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -188,7 +189,8 @@ def _verify_hmac( self, public_key: ec.EllipticCurvePublicKey, request: Request, hmac_value: bytes ) -> bool: shared_secret = generate_shared_key(self.server_private_key, public_key) - return verify_hmac(shared_secret, request.SerializeToString(True), hmac_value) + message_bytes = request.SerializeToString(deterministic=True) + return verify_hmac(shared_secret, message_bytes, hmac_value) def _create_authenticated_node( self, diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py index 74914be68a8f..cf7e05f0fb00 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -20,6 +20,7 @@ import grpc +from flwr.common.constant import FLEET_API_GRPC_RERE_DEFAULT_ADDRESS from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( compute_hmac, generate_key_pairs, @@ -42,7 +43,7 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.app import ADDRESS_FLEET_API_GRPC_RERE, _run_fleet_api_grpc_rere +from flwr.server.app import _run_fleet_api_grpc_rere from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.state.state_factory import StateFactory @@ -73,7 +74,7 @@ def setUp(self) -> None: self._server_interceptor = AuthenticateServerInterceptor(self.state) self._server: grpc.Server = _run_fleet_api_grpc_rere( - ADDRESS_FLEET_API_GRPC_RERE, + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, state_factory, ffs_factory, None, @@ -166,7 +167,7 @@ def test_successful_delete_node_with_metadata(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -195,7 +196,7 @@ def test_unsuccessful_delete_node_with_metadata(self) -> None: node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -222,7 +223,7 @@ def test_successful_pull_task_ins_with_metadata(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -251,7 +252,7 @@ def test_unsuccessful_pull_task_ins_with_metadata(self) -> None: node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -280,7 +281,7 @@ def test_successful_push_task_res_with_metadata(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -311,7 +312,7 @@ def test_unsuccessful_push_task_res_with_metadata(self) -> None: node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -339,7 +340,7 @@ def test_successful_get_run_with_metadata(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -369,7 +370,7 @@ def test_unsuccessful_get_run_with_metadata(self) -> None: node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -396,7 +397,7 @@ def test_successful_ping_with_metadata(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -425,7 +426,7 @@ def test_unsuccessful_ping_with_metadata(self) -> None: node_private_key, _ = generate_key_pairs() shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) @@ -469,7 +470,7 @@ def test_successful_restore_node(self) -> None: self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( public_key_to_bytes(self._node_public_key) diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 64f9ac609998..85f3fa34e0ac 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -16,7 +16,7 @@ import time -from typing import List, Optional +from typing import Optional from uuid import UUID from flwr.common.serde import fab_to_proto, user_config_to_proto @@ -83,7 +83,7 @@ def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsRespo node_id: Optional[int] = None if node.anonymous else node.node_id # Retrieve TaskIns from State - task_ins_list: List[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) + task_ins_list: list[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) # Build response response = PullTaskInsResponse( diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index cf5ad16f7999..a988252b3ea2 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -18,7 +18,8 @@ from __future__ import annotations import sys -from typing import Awaitable, Callable, TypeVar +from collections.abc import Awaitable +from typing import Callable, TypeVar from google.protobuf.message import Message as GrpcMessage diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py index a8c671810a51..31129fce1b1b 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py @@ -15,17 +15,16 @@ """Simulation Engine Backends.""" import importlib -from typing import Dict, Type from .backend import Backend, BackendConfig is_ray_installed = importlib.util.find_spec("ray") is not None # Mapping of supported backends -supported_backends: Dict[str, Type[Backend]] = {} +supported_backends: dict[str, type[Backend]] = {} # To log backend-specific error message when chosen backend isn't available -error_messages_backends: Dict[str, str] = {} +error_messages_backends: dict[str, str] = {} if is_ray_installed: from .raybackend import RayBackend diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py index 89341c0d238f..38be6032e3a5 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py @@ -16,14 +16,14 @@ from abc import ABC, abstractmethod -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.message import Message from flwr.common.typing import ConfigsRecordValues -BackendConfig = Dict[str, Dict[str, ConfigsRecordValues]] +BackendConfig = dict[str, dict[str, ConfigsRecordValues]] class Backend(ABC): @@ -62,5 +62,5 @@ def process_message( self, message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Submit a job to the backend.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index acfb248a6366..dd79d2ef7f62 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -16,7 +16,7 @@ import sys from logging import DEBUG, ERROR -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Optional, Union import ray @@ -31,8 +31,8 @@ from .backend import Backend, BackendConfig -ClientResourcesDict = Dict[str, Union[int, float]] -ActorArgsDict = Dict[str, Union[int, float, Callable[[], None]]] +ClientResourcesDict = dict[str, Union[int, float]] +ActorArgsDict = dict[str, Union[int, float, Callable[[], None]]] class RayBackend(Backend): @@ -52,16 +52,11 @@ def __init__( # Validate client resources self.client_resources_key = "client_resources" - client_resources = self._validate_client_resources(config=backend_config) + self.client_resources = self._validate_client_resources(config=backend_config) - # Create actor pool - actor_kwargs = self._validate_actor_arguments(config=backend_config) - - self.pool = BasicActorPool( - actor_type=ClientAppActor, - client_resources=client_resources, - actor_kwargs=actor_kwargs, - ) + # Valide actor resources + self.actor_kwargs = self._validate_actor_arguments(config=backend_config) + self.pool: Optional[BasicActorPool] = None self.app_fn: Optional[Callable[[], ClientApp]] = None @@ -106,7 +101,7 @@ def _validate_actor_arguments(self, config: BackendConfig) -> ActorArgsDict: def init_ray(self, backend_config: BackendConfig) -> None: """Intialises Ray if not already initialised.""" if not ray.is_initialized(): - ray_init_args: Dict[ + ray_init_args: dict[ str, ConfigsRecordValues, ] = {} @@ -122,14 +117,24 @@ def init_ray(self, backend_config: BackendConfig) -> None: @property def num_workers(self) -> int: """Return number of actors in pool.""" - return self.pool.num_actors + return self.pool.num_actors if self.pool else 0 def is_worker_idle(self) -> bool: """Report whether the pool has idle actors.""" - return self.pool.is_actor_available() + return self.pool.is_actor_available() if self.pool else False def build(self, app_fn: Callable[[], ClientApp]) -> None: """Build pool of Ray actors that this backend will submit jobs to.""" + # Create Actor Pool + try: + self.pool = BasicActorPool( + actor_type=ClientAppActor, + client_resources=self.client_resources, + actor_kwargs=self.actor_kwargs, + ) + except Exception as ex: + raise ex + self.pool.add_actors_to_pool(self.pool.actors_capacity) # Set ClientApp callable that ray actors will use self.app_fn = app_fn @@ -139,13 +144,16 @@ def process_message( self, message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Run ClientApp that process a given message. Return output message and updated context. """ partition_id = context.node_config[PARTITION_ID_KEY] + if self.pool is None: + raise ValueError("The actor pool is empty, unfit to process messages.") + if self.app_fn is None: raise ValueError( "Unspecified function to load a `ClientApp`. " @@ -179,6 +187,7 @@ def process_message( def terminate(self) -> None: """Terminate all actors in actor pool.""" - self.pool.terminate_all_actors() + if self.pool: + self.pool.terminate_all_actors() ray.shutdown() log(DEBUG, "Terminated %s", self.__class__.__name__) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index cdb11401c29c..1cbdc230c938 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -15,7 +15,7 @@ """Test for Ray backend for the Fleet API using the Simulation Engine.""" from math import pi -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Optional, Union from unittest import TestCase import ray @@ -47,7 +47,7 @@ class DummyClient(NumPyClient): def __init__(self, state: RecordSet) -> None: self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = float(config["factor"]) * pi @@ -69,8 +69,8 @@ def _load_app() -> ClientApp: def backend_build_process_and_termination( backend: RayBackend, app_fn: Callable[[], ClientApp], - process_args: Optional[Tuple[Message, Context]] = None, -) -> Union[Tuple[Message, Context], None]: + process_args: Optional[tuple[Message, Context]] = None, +) -> Union[tuple[Message, Context], None]: """Build, process job and terminate RayBackend.""" backend.build(app_fn) to_return = None @@ -83,7 +83,7 @@ def backend_build_process_and_termination( return to_return -def _create_message_and_context() -> Tuple[Message, Context, float]: +def _create_message_and_context() -> tuple[Message, Context, float]: # Construct a Message mult_factor = 2024 diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index 165c2de73c21..8f4e18e14e28 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -24,7 +24,7 @@ from pathlib import Path from queue import Empty, Queue from time import sleep -from typing import Callable, Dict, Optional +from typing import Callable, Optional from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.clientapp.utils import get_load_client_app_fn @@ -44,7 +44,7 @@ from .backend import Backend, error_messages_backends, supported_backends -NodeToPartitionMapping = Dict[int, int] +NodeToPartitionMapping = dict[int, int] def _register_nodes( @@ -64,9 +64,9 @@ def _register_node_states( nodes_mapping: NodeToPartitionMapping, run: Run, app_dir: Optional[str] = None, -) -> Dict[int, NodeState]: +) -> dict[int, NodeState]: """Create NodeState objects and pre-register the context for the run.""" - node_states: Dict[int, NodeState] = {} + node_states: dict[int, NodeState] = {} num_partitions = len(set(nodes_mapping.values())) for node_id, partition_id in nodes_mapping.items(): node_states[node_id] = NodeState( @@ -89,7 +89,7 @@ def _register_node_states( def worker( taskins_queue: "Queue[TaskIns]", taskres_queue: "Queue[TaskRes]", - node_states: Dict[int, NodeState], + node_states: dict[int, NodeState], backend: Backend, f_stop: threading.Event, ) -> None: @@ -177,7 +177,7 @@ def run_api( backend_fn: Callable[[], Backend], nodes_mapping: NodeToPartitionMapping, state_factory: StateFactory, - node_states: Dict[int, NodeState], + node_states: dict[int, NodeState], f_stop: threading.Event, ) -> None: """Run the VCE.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 76e8ac9156d2..1cc3a8f128b6 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -22,7 +22,7 @@ from math import pi from pathlib import Path from time import sleep -from typing import Dict, Optional, Set, Tuple +from typing import Optional from unittest import TestCase from uuid import UUID @@ -57,7 +57,7 @@ class DummyClient(NumPyClient): def __init__(self, state: RecordSet) -> None: self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = float(config["factor"]) * pi @@ -86,7 +86,7 @@ def terminate_simulation(f_stop: threading.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, -) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: +) -> tuple[StateFactory, NodeToPartitionMapping, dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it run_id = 1234 @@ -110,7 +110,7 @@ def register_messages_into_state( nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, -) -> Dict[UUID, float]: +) -> dict[UUID, float]: """Register `num_messages` into the state factory.""" state: InMemoryState = state_factory.state() # type: ignore state.run_ids[run_id] = Run( @@ -123,7 +123,7 @@ def register_messages_into_state( # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic nodes_cycle = cycle(nodes_mapping.keys()) # we have more messages than supernodes - task_ids: Set[UUID] = set() # so we can retrieve them later + task_ids: set[UUID] = set() # so we can retrieve them later expected_results = {} for i in range(num_messages): dst_node_id = next(nodes_cycle) diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index c87ba86e47e7..e34d15374350 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -18,7 +18,7 @@ import threading import time from logging import ERROR -from typing import Dict, List, Optional, Set, Tuple +from typing import Optional from uuid import UUID, uuid4 from flwr.common import log, now @@ -37,15 +37,15 @@ class InMemoryState(State): # pylint: disable=R0902,R0904 def __init__(self) -> None: # Map node_id to (online_until, ping_interval) - self.node_ids: Dict[int, Tuple[float, float]] = {} - self.public_key_to_node_id: Dict[bytes, int] = {} + self.node_ids: dict[int, tuple[float, float]] = {} + self.public_key_to_node_id: dict[bytes, int] = {} # Map run_id to (fab_id, fab_version) - self.run_ids: Dict[int, Run] = {} - self.task_ins_store: Dict[UUID, TaskIns] = {} - self.task_res_store: Dict[UUID, TaskRes] = {} + self.run_ids: dict[int, Run] = {} + self.task_ins_store: dict[UUID, TaskIns] = {} + self.task_res_store: dict[UUID, TaskRes] = {} - self.node_public_keys: Set[bytes] = set() + self.node_public_keys: set[bytes] = set() self.server_public_key: Optional[bytes] = None self.server_private_key: Optional[bytes] = None @@ -76,13 +76,13 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get all TaskIns that have not been delivered yet.""" if limit is not None and limit < 1: raise AssertionError("`limit` must be >= 1") # Find TaskIns for node_id that were not delivered yet - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] with self.lock: for _, task_ins in self.task_ins_store.items(): # pylint: disable=too-many-boolean-expressions @@ -133,15 +133,15 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Return the new task_id return task_id - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]: """Get all TaskRes that have not been delivered yet.""" if limit is not None and limit < 1: raise AssertionError("`limit` must be >= 1") with self.lock: # Find TaskRes that were not delivered yet - task_res_list: List[TaskRes] = [] - replied_task_ids: Set[UUID] = set() + task_res_list: list[TaskRes] = [] + replied_task_ids: set[UUID] = set() for _, task_res in self.task_res_store.items(): reply_to = UUID(task_res.task.ancestry[0]) if reply_to in task_ids and task_res.task.delivered_at == "": @@ -175,10 +175,10 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Return TaskRes return task_res_list - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" - task_ins_to_be_deleted: Set[UUID] = set() - task_res_to_be_deleted: Set[UUID] = set() + task_ins_to_be_deleted: set[UUID] = set() + task_res_to_be_deleted: set[UUID] = set() with self.lock: for task_ins_id in task_ids: @@ -253,7 +253,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: del self.node_ids[node_id] - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Return all available nodes. Constraints @@ -318,7 +318,7 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" return self.server_public_key - def store_node_public_keys(self, public_keys: Set[bytes]) -> None: + def store_node_public_keys(self, public_keys: set[bytes]) -> None: """Store a set of `node_public_keys` in state.""" with self.lock: self.node_public_keys = public_keys @@ -328,7 +328,7 @@ def store_node_public_key(self, public_key: bytes) -> None: with self.lock: self.node_public_keys.add(public_key) - def get_node_public_keys(self) -> Set[bytes]: + def get_node_public_keys(self) -> set[bytes]: """Retrieve all currently stored `node_public_keys` as a set.""" return self.node_public_keys diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index daa211560912..28d957a90bd3 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -19,8 +19,9 @@ import re import sqlite3 import time +from collections.abc import Sequence from logging import DEBUG, ERROR -from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast +from typing import Any, Optional, Union, cast from uuid import UUID, uuid4 from flwr.common import log, now @@ -32,7 +33,14 @@ from flwr.server.utils.validator import validate_task_ins_or_res from .state import State -from .utils import generate_rand_int_from_bytes, make_node_unavailable_taskres +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, + make_node_unavailable_taskres, +) SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( @@ -110,7 +118,7 @@ ); """ -DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] +DictOrTuple = Union[tuple[Any, ...], dict[str, Any]] class SqliteState(State): # pylint: disable=R0904 @@ -131,7 +139,7 @@ def __init__( self.database_path = database_path self.conn: Optional[sqlite3.Connection] = None - def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: + def initialize(self, log_queries: bool = False) -> list[tuple[str]]: """Create tables if they don't exist yet. Parameters @@ -162,7 +170,7 @@ def query( self, query: str, data: Optional[Union[Sequence[DictOrTuple], DictOrTuple]] = None, - ) -> List[Dict[str, Any]]: + ) -> list[dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: raise AttributeError("State is not initialized.") @@ -222,6 +230,12 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: # Store TaskIns task_ins.task_id = str(task_id) data = (task_ins_to_dict(task_ins),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -237,7 +251,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get undelivered TaskIns for one node (either anonymous or with ID). Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -271,7 +285,7 @@ def get_task_ins( ) raise AssertionError(msg) - data: Dict[str, Union[str, int]] = {} + data: dict[str, Union[str, int]] = {} if node_id is None: # Retrieve all anonymous Tasks @@ -283,6 +297,9 @@ def get_task_ins( AND delivered_at = "" """ else: + # Convert the uint64 value to sint64 for SQLite + data["node_id"] = convert_uint64_to_sint64(node_id) + # Retrieve all TaskIns for node_id query = """ SELECT task_id @@ -291,7 +308,6 @@ def get_task_ins( AND consumer_node_id == :node_id AND delivered_at = "" """ - data["node_id"] = node_id if limit is not None: query += " LIMIT :limit" @@ -321,6 +337,12 @@ def get_task_ins( # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_ins(row) for row in rows] return result @@ -353,6 +375,12 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Store TaskIns task_res.task_id = str(task_id) data = (task_res_to_dict(task_res),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" @@ -367,7 +395,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: return task_id # pylint: disable-next=R0914 - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -397,7 +425,7 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe AND delivered_at = "" """ - data: Dict[str, Union[str, float, int]] = {} + data: dict[str, Union[str, float, int]] = {} if limit is not None: query += " LIMIT :limit" @@ -430,12 +458,18 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_res(row) for row in rows] # 1. Query: Fetch consumer_node_id of remaining task_ids # Assume the ancestry field only contains one element data.clear() - replied_task_ids: Set[UUID] = {UUID(str(row["ancestry"])) for row in rows} + replied_task_ids: set[UUID] = {UUID(str(row["ancestry"])) for row in rows} remaining_task_ids = task_ids - replied_task_ids placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) query = f""" @@ -473,6 +507,13 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe for row in task_ins_rows: if limit and len(result) == limit: break + + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + task_ins = dict_to_task_ins(row) err_taskres = make_node_unavailable_taskres( ref_taskins=task_ins, @@ -499,10 +540,10 @@ def num_task_res(self) -> int: """ query = "SELECT count(*) AS num FROM task_res;" rows = self.query(query) - result: Dict[str, int] = rows[0] + result: dict[str, int] = rows[0] return result["num"] - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" ids = list(task_ids) if len(ids) == 0: @@ -543,8 +584,11 @@ def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: """Create, store in state, and return `node_id`.""" - # Sample a random int64 as node_id - node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + # Sample a random uint64 as node_id + uint64_node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(uint64_node_id) query = "SELECT node_id FROM node WHERE public_key = :public_key;" row = self.query(query, {"public_key": public_key}) @@ -561,17 +605,28 @@ def create_node( try: self.query( - query, (node_id, time.time() + ping_interval, ping_interval, public_key) + query, + ( + sint64_node_id, + time.time() + ping_interval, + ping_interval, + public_key, + ), ) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 - return node_id + + # Note: we need to return the uint64 value of the node_id + return uint64_node_id def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Delete a node.""" + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(node_id) + query = "DELETE FROM node WHERE node_id = ?" - params = (node_id,) + params = (sint64_node_id,) if public_key is not None: query += " AND public_key = ?" @@ -588,7 +643,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: except KeyError as exc: log(ERROR, {"query": query, "data": params, "exception": exc}) - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -596,15 +651,20 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + # Validate run ID query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: return set() # Get nodes query = "SELECT node_id FROM node WHERE online_until > ?;" rows = self.query(query, (time.time(),)) - result: Set[int] = {row["node_id"] for row in rows} + + # Convert sint64 node_ids to uint64 + result: set[int] = {convert_sint64_to_uint64(row["node_id"]) for row in rows} return result def get_node_id(self, node_public_key: bytes) -> Optional[int]: @@ -613,7 +673,11 @@ def get_node_id(self, node_public_key: bytes) -> Optional[int]: row = self.query(query, {"public_key": node_public_key}) if len(row) > 0: node_id: int = row[0]["node_id"] - return node_id + + # Convert the sint64 value to uint64 after reading from SQLite + uint64_node_id = convert_sint64_to_uint64(node_id) + + return uint64_node_id return None def create_run( @@ -625,12 +689,15 @@ def create_run( ) -> int: """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id - run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + uint64_run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(uint64_run_id) # Check conflicts query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - # If run_id does not exist - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + # If sint64_run_id does not exist + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: query = ( "INSERT INTO run " "(run_id, fab_id, fab_version, fab_hash, override_config)" @@ -638,14 +705,22 @@ def create_run( ) if fab_hash: self.query( - query, (run_id, "", "", fab_hash, json.dumps(override_config)) + query, + (sint64_run_id, "", "", fab_hash, json.dumps(override_config)), ) else: self.query( query, - (run_id, fab_id, fab_version, "", json.dumps(override_config)), + ( + sint64_run_id, + fab_id, + fab_version, + "", + json.dumps(override_config), + ), ) - return run_id + # Note: we need to return the uint64 value of the run_id + return uint64_run_id log(ERROR, "Unexpected run creation failure.") return 0 @@ -684,7 +759,7 @@ def get_server_public_key(self) -> Optional[bytes]: public_key = None return public_key - def store_node_public_keys(self, public_keys: Set[bytes]) -> None: + def store_node_public_keys(self, public_keys: set[bytes]) -> None: """Store a set of `node_public_keys` in state.""" query = "INSERT INTO public_key (public_key) VALUES (?)" data = [(key,) for key in public_keys] @@ -695,35 +770,41 @@ def store_node_public_key(self, public_key: bytes) -> None: query = "INSERT INTO public_key (public_key) VALUES (:public_key)" self.query(query, {"public_key": public_key}) - def get_node_public_keys(self) -> Set[bytes]: + def get_node_public_keys(self) -> set[bytes]: """Retrieve all currently stored `node_public_keys` as a set.""" query = "SELECT public_key FROM public_key" rows = self.query(query) - result: Set[bytes] = {row["public_key"] for row in rows} + result: set[bytes] = {row["public_key"] for row in rows} return result def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) query = "SELECT * FROM run WHERE run_id = ?;" - try: - row = self.query(query, (run_id,))[0] + rows = self.query(query, (sint64_run_id,)) + if rows: + row = rows[0] return Run( - run_id=run_id, + run_id=convert_sint64_to_uint64(row["run_id"]), fab_id=row["fab_id"], fab_version=row["fab_version"], fab_hash=row["fab_hash"], override_config=json.loads(row["override_config"]), ) - except sqlite3.IntegrityError: - log(ERROR, "`run_id` does not exist.") - return None + log(ERROR, "`run_id` does not exist.") + return None def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" + sint64_node_id = convert_uint64_to_sint64(node_id) + # Update `online_until` and `ping_interval` for the given `node_id` query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" try: - self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + self.query( + query, (time.time() + ping_interval, ping_interval, sint64_node_id) + ) return True except sqlite3.IntegrityError: log(ERROR, "`node_id` does not exist.") @@ -733,7 +814,7 @@ def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: def dict_factory( cursor: sqlite3.Cursor, row: sqlite3.Row, -) -> Dict[str, Any]: +) -> dict[str, Any]: """Turn SQLite results into dicts. Less efficent for retrival of large amounts of data but easier to use. @@ -742,7 +823,7 @@ def dict_factory( return dict(zip(fields, row)) -def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: +def task_ins_to_dict(task_msg: TaskIns) -> dict[str, Any]: """Transform TaskIns to dict.""" result = { "task_id": task_msg.task_id, @@ -763,7 +844,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: return result -def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: +def task_res_to_dict(task_msg: TaskRes) -> dict[str, Any]: """Transform TaskRes to dict.""" result = { "task_id": task_msg.task_id, @@ -784,7 +865,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: return result -def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: +def dict_to_task_ins(task_dict: dict[str, Any]) -> TaskIns: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) @@ -814,7 +895,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: return result -def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: +def dict_to_task_res(task_dict: dict[str, Any]) -> TaskRes: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/state/state.py index fea53105b23f..39da052fb0aa 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/state/state.py @@ -16,7 +16,7 @@ import abc -from typing import List, Optional, Set +from typing import Optional from uuid import UUID from flwr.common.typing import Run, UserConfig @@ -51,7 +51,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @abc.abstractmethod def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get TaskIns optionally filtered by node_id. Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -98,7 +98,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: """ @abc.abstractmethod - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID], limit: Optional[int]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -129,7 +129,7 @@ def num_task_res(self) -> int: """ @abc.abstractmethod - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod @@ -143,7 +143,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: """Remove `node_id` from state.""" @abc.abstractmethod - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -199,7 +199,7 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" @abc.abstractmethod - def store_node_public_keys(self, public_keys: Set[bytes]) -> None: + def store_node_public_keys(self, public_keys: set[bytes]) -> None: """Store a set of `node_public_keys` in state.""" @abc.abstractmethod @@ -207,7 +207,7 @@ def store_node_public_key(self, public_key: bytes) -> None: """Store a `node_public_key` in state.""" @abc.abstractmethod - def get_node_public_keys(self) -> Set[bytes]: + def get_node_public_keys(self) -> set[bytes]: """Retrieve all currently stored `node_public_keys` as a set.""" @abc.abstractmethod diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 0cf30a42ca2c..42c0768f1c7d 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -20,7 +20,6 @@ import unittest from abc import abstractmethod from datetime import datetime, timezone -from typing import List from unittest.mock import patch from uuid import uuid4 @@ -655,7 +654,7 @@ def test_node_unavailable_error(self) -> None: # Execute current_time = time.time() - task_res_list: List[TaskRes] = [] + task_res_list: list[TaskRes] = [] with patch("time.time", side_effect=lambda: current_time + 50): task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None) @@ -698,7 +697,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], run_id: int, ) -> TaskRes: """Create a TaskRes for testing.""" diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py index b12a87ac998d..00ba02d2e43b 100644 --- a/src/py/flwr/server/superlink/state/utils.py +++ b/src/py/flwr/server/superlink/state/utils.py @@ -33,8 +33,104 @@ def generate_rand_int_from_bytes(num_bytes: int) -> int: - """Generate a random `num_bytes` integer.""" - return int.from_bytes(urandom(num_bytes), "little", signed=True) + """Generate a random unsigned integer from `num_bytes` bytes.""" + return int.from_bytes(urandom(num_bytes), "little", signed=False) + + +def convert_uint64_to_sint64(u: int) -> int: + """Convert a uint64 value to a sint64 value with the same bit sequence. + + Parameters + ---------- + u : int + The unsigned 64-bit integer to convert. + + Returns + ------- + int + The signed 64-bit integer equivalent. + + The signed 64-bit integer will have the same bit pattern as the + unsigned 64-bit integer but may have a different decimal value. + + For numbers within the range [0, `sint64` max value], the decimal + value remains the same. However, for numbers greater than the `sint64` + max value, the decimal value will differ due to the wraparound caused + by the sign bit. + """ + if u >= (1 << 63): + return u - (1 << 64) + return u + + +def convert_sint64_to_uint64(s: int) -> int: + """Convert a sint64 value to a uint64 value with the same bit sequence. + + Parameters + ---------- + s : int + The signed 64-bit integer to convert. + + Returns + ------- + int + The unsigned 64-bit integer equivalent. + + The unsigned 64-bit integer will have the same bit pattern as the + signed 64-bit integer but may have a different decimal value. + + For negative `sint64` values, the conversion adds 2^64 to the + signed value to obtain the equivalent `uint64` value. For non-negative + `sint64` values, the decimal value remains unchanged in the `uint64` + representation. + """ + if s < 0: + return s + (1 << 64) + return s + + +def convert_uint64_values_in_dict_to_sint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert uint64 values to sint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + + Returns + ------- + None + This function does not return a value. It modifies `data_dict` in place. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_uint64_to_sint64(data_dict[key]) + + +def convert_sint64_values_in_dict_to_uint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert sint64 values to uint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + + Returns + ------- + None + This function does not return a value. It modifies `data_dict` in place. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_sint64_to_uint64(data_dict[key]) def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: diff --git a/src/py/flwr/server/superlink/state/utils_test.py b/src/py/flwr/server/superlink/state/utils_test.py new file mode 100644 index 000000000000..d55e2ffd9aa3 --- /dev/null +++ b/src/py/flwr/server/superlink/state/utils_test.py @@ -0,0 +1,150 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils tests.""" + +import unittest + +from parameterized import parameterized + +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, +) + + +class UtilsTest(unittest.TestCase): + """Test utils code.""" + + @parameterized.expand( # type: ignore + [ + # Test values within the positive range of sint64 (below 2^63) + (0, 0), # Minimum positive value + (1, 1), # 1 remains 1 in both uint64 and sint64 + (2**62, 2**62), # Mid-range positive value + (2**63 - 1, 2**63 - 1), # Maximum positive value for sint64 + # Test values at or above 2^63 (become negative in sint64) + (2**63, -(2**63)), # Minimum negative value for sint64 + (2**63 + 1, -(2**63) + 1), # Slightly above the boundary + (9223372036854775811, -9223372036854775805), # Some value > sint64 max + (2**64 - 1, -1), # Maximum uint64 value becomes -1 in sint64 + ] + ) + def test_convert_uint64_to_sint64(self, before: int, after: int) -> None: + """Test conversion from uint64 to sint64.""" + self.assertEqual(convert_uint64_to_sint64(before), after) + + @parameterized.expand( # type: ignore + [ + # Test values within the negative range of sint64 + (-(2**63), 2**63), # Minimum sint64 value becomes 2^63 in uint64 + (-(2**63) + 1, 2**63 + 1), # Slightly above the minimum + (-9223372036854775805, 9223372036854775811), # Some value > sint64 max + # Test zero-adjacent inputs + (-1, 2**64 - 1), # -1 in sint64 becomes 2^64 - 1 in uint64 + (0, 0), # 0 remains 0 in both sint64 and uint64 + (1, 1), # 1 remains 1 in both sint64 and uint64 + # Test values within the positive range of sint64 + (2**63 - 1, 2**63 - 1), # Maximum positive value in sint64 + # Test boundary and maximum uint64 value + (2**63, 2**63), # Exact boundary value for sint64 + (2**64 - 1, 2**64 - 1), # Maximum uint64 value, stays the same + ] + ) + def test_sint64_to_uint64(self, before: int, after: int) -> None: + """Test conversion from sint64 to uint64.""" + self.assertEqual(convert_sint64_to_uint64(before), after) + + @parameterized.expand( # type: ignore + [ + (0), + (1), + (2**62), + (2**63 - 1), + (2**63), + (2**63 + 1), + (9223372036854775811), + (2**64 - 1), + ] + ) + def test_uint64_to_sint64_to_uint64(self, expected: int) -> None: + """Test conversion from sint64 to uint64.""" + actual = convert_sint64_to_uint64(convert_uint64_to_sint64(expected)) + self.assertEqual(expected, actual) + + @parameterized.expand( # type: ignore + [ + # Test cases with uint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ), + ( + {"a": 1, "b": 2**62, "c": 2**63 + 1}, + ["a", "b", "c"], + {"a": 1, "b": 2**62, "c": -(2**63) + 1}, + ), + # Edge cases with mixed uint64 values and keys + ( + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": -1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_uint64_values_in_dict_to_sint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test uint64 to sint64 conversion in a dictionary.""" + convert_uint64_values_in_dict_to_sint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + @parameterized.expand( # type: ignore + [ + # Test cases with sint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ), + ( + {"a": -1, "b": -(2**63) + 1, "c": 12345}, + ["a", "b", "c"], + {"a": 2**64 - 1, "b": 2**63 + 1, "c": 12345}, + ), + # Edge cases with mixed sint64 values and keys + ( + {"a": -1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_sint64_values_in_dict_to_uint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test sint64 to uint64 conversion in a dictionary.""" + convert_sint64_values_in_dict_to_uint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + def test_generate_rand_int_from_bytes_unsigned_int(self) -> None: + """Test that the generated integer is unsigned (non-negative).""" + for num_bytes in range(1, 9): + with self.subTest(num_bytes=num_bytes): + rand_int = generate_rand_int_from_bytes(num_bytes) + self.assertGreaterEqual(rand_int, 0) diff --git a/src/py/flwr/server/utils/tensorboard.py b/src/py/flwr/server/utils/tensorboard.py index 5d38fc159657..281e8949c53c 100644 --- a/src/py/flwr/server/utils/tensorboard.py +++ b/src/py/flwr/server/utils/tensorboard.py @@ -18,7 +18,7 @@ import os from datetime import datetime from logging import WARN -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from flwr.common import EvaluateRes, Scalar from flwr.common.logger import log @@ -92,9 +92,9 @@ class TBWrapper(strategy_class): # type: ignore def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Hooks into aggregate_evaluate for TensorBoard logging purpose.""" # Execute decorated function and extract results for logging # They will be returned at the end of this function but also diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index c0b0ec85761c..fb3d0425db86 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -15,13 +15,13 @@ """Validators.""" -from typing import List, Union +from typing import Union from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 # pylint: disable-next=too-many-branches,too-many-statements -def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str]: +def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> list[str]: """Validate a TaskIns or TaskRes.""" validation_errors = [] diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 61fe094c23d4..20162883efea 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -17,7 +17,6 @@ import time import unittest -from typing import List, Tuple from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 @@ -52,12 +51,12 @@ def test_is_valid_task_res(self) -> None: """Test is_valid task_res.""" # Prepare # (producer_node_id, anonymous, ancestry) - valid_res: List[Tuple[int, bool, List[str]]] = [ + valid_res: list[tuple[int, bool, list[str]]] = [ (0, True, ["1"]), (1, False, ["1"]), ] - invalid_res: List[Tuple[int, bool, List[str]]] = [ + invalid_res: list[tuple[int, bool, list[str]]] = [ (0, False, []), (0, False, ["1"]), (0, True, []), @@ -110,7 +109,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], ) -> TaskRes: """Create a TaskRes for testing.""" task_res = TaskRes( diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 82d8d5d4ccb6..484a747292d5 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -18,7 +18,7 @@ import io import timeit from logging import INFO, WARN -from typing import List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -276,8 +276,8 @@ def default_fit_workflow( # pylint: disable=R0914 ) # Aggregate training results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] @@ -362,8 +362,8 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: ) # Aggregate the evaluation results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 322e32ed5019..d84a5496dfe1 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -18,7 +18,7 @@ import random from dataclasses import dataclass, field from logging import DEBUG, ERROR, INFO, WARN -from typing import Dict, List, Optional, Set, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -65,23 +65,23 @@ class WorkflowState: # pylint: disable=R0902 """The state of the SecAgg+ protocol.""" - nid_to_proxies: Dict[int, ClientProxy] = field(default_factory=dict) - nid_to_fitins: Dict[int, RecordSet] = field(default_factory=dict) - sampled_node_ids: Set[int] = field(default_factory=set) - active_node_ids: Set[int] = field(default_factory=set) + nid_to_proxies: dict[int, ClientProxy] = field(default_factory=dict) + nid_to_fitins: dict[int, RecordSet] = field(default_factory=dict) + sampled_node_ids: set[int] = field(default_factory=set) + active_node_ids: set[int] = field(default_factory=set) num_shares: int = 0 threshold: int = 0 clipping_range: float = 0.0 quantization_range: int = 0 mod_range: int = 0 max_weight: float = 0.0 - nid_to_neighbours: Dict[int, Set[int]] = field(default_factory=dict) - nid_to_publickeys: Dict[int, List[bytes]] = field(default_factory=dict) - forward_srcs: Dict[int, List[int]] = field(default_factory=dict) - forward_ciphertexts: Dict[int, List[bytes]] = field(default_factory=dict) + nid_to_neighbours: dict[int, set[int]] = field(default_factory=dict) + nid_to_publickeys: dict[int, list[bytes]] = field(default_factory=dict) + forward_srcs: dict[int, list[int]] = field(default_factory=dict) + forward_ciphertexts: dict[int, list[bytes]] = field(default_factory=dict) aggregate_ndarrays: NDArrays = field(default_factory=list) - legacy_results: List[Tuple[ClientProxy, FitRes]] = field(default_factory=list) - failures: List[Exception] = field(default_factory=list) + legacy_results: list[tuple[ClientProxy, FitRes]] = field(default_factory=list) + failures: list[Exception] = field(default_factory=list) class SecAggPlusWorkflow: @@ -444,13 +444,13 @@ def make(nid: int) -> Message: ) # Build forward packet list dictionary - srcs: List[int] = [] - dsts: List[int] = [] - ciphertexts: List[bytes] = [] - fwd_ciphertexts: Dict[int, List[bytes]] = { + srcs: list[int] = [] + dsts: list[int] = [] + ciphertexts: list[bytes] = [] + fwd_ciphertexts: dict[int, list[bytes]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of ciphertexts - fwd_srcs: Dict[int, List[int]] = { + fwd_srcs: dict[int, list[int]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of src node IDs for msg in msgs: @@ -459,8 +459,8 @@ def make(nid: int) -> Message: continue node_id = msg.metadata.src_node_id res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - dst_lst = cast(List[int], res_dict[Key.DESTINATION_LIST]) - ctxt_lst = cast(List[bytes], res_dict[Key.CIPHERTEXT_LIST]) + dst_lst = cast(list[int], res_dict[Key.DESTINATION_LIST]) + ctxt_lst = cast(list[bytes], res_dict[Key.CIPHERTEXT_LIST]) srcs += [node_id] * len(dst_lst) dsts += dst_lst ciphertexts += ctxt_lst @@ -525,7 +525,7 @@ def make(nid: int) -> Message: state.failures.append(Exception(msg.error)) continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - bytes_list = cast(List[bytes], res_dict[Key.MASKED_PARAMETERS]) + bytes_list = cast(list[bytes], res_dict[Key.MASKED_PARAMETERS]) client_masked_vec = [bytes_to_ndarray(b) for b in bytes_list] if masked_vector is None: masked_vector = client_masked_vec @@ -592,7 +592,7 @@ def make(nid: int) -> Message: ) # Build collected shares dict - collected_shares_dict: Dict[int, List[bytes]] = {} + collected_shares_dict: dict[int, list[bytes]] = {} for nid in state.sampled_node_ids: collected_shares_dict[nid] = [] for msg in msgs: @@ -600,8 +600,8 @@ def make(nid: int) -> Message: state.failures.append(Exception(msg.error)) continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - nids = cast(List[int], res_dict[Key.NODE_ID_LIST]) - shares = cast(List[bytes], res_dict[Key.SHARE_LIST]) + nids = cast(list[int], res_dict[Key.NODE_ID_LIST]) + shares = cast(list[bytes], res_dict[Key.SHARE_LIST]) for owner_nid, share in zip(nids, shares): collected_shares_dict[owner_nid].append(share) diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 973a9a89e652..0070d75c53dc 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -22,7 +22,7 @@ import traceback import warnings from logging import ERROR, INFO -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Optional, Union import ray from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -72,7 +72,7 @@ """ -NodeToPartitionMapping = Dict[int, int] +NodeToPartitionMapping = dict[int, int] def _create_node_id_to_partition_mapping( @@ -94,16 +94,16 @@ def start_simulation( *, client_fn: ClientFnExt, num_clients: int, - clients_ids: Optional[List[str]] = None, # UNSUPPORTED, WILL BE REMOVED - client_resources: Optional[Dict[str, float]] = None, + clients_ids: Optional[list[str]] = None, # UNSUPPORTED, WILL BE REMOVED + client_resources: Optional[dict[str, float]] = None, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, + ray_init_args: Optional[dict[str, Any]] = None, keep_initialised: Optional[bool] = False, - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, - actor_kwargs: Optional[Dict[str, Any]] = None, + actor_type: type[VirtualClientEngineActor] = ClientAppActor, + actor_kwargs: Optional[dict[str, Any]] = None, actor_scheduling: Union[str, NodeAffinitySchedulingStrategy] = "DEFAULT", ) -> History: """Start a Ray-based Flower simulation server. @@ -279,7 +279,7 @@ def start_simulation( # An actor factory. This is called N times to add N actors # to the pool. If at some point the pool can accommodate more actors # this will be called again. - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options( # type: ignore **client_resources, scheduling_strategy=actor_scheduling, diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index b1c9d2b9c0c1..4fb48a99b689 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -17,7 +17,7 @@ import threading from abc import ABC from logging import DEBUG, ERROR, WARNING -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Callable, Optional, Union import ray from ray import ObjectRef @@ -44,7 +44,7 @@ def run( message: Message, cid: str, context: Context, - ) -> Tuple[str, Message, Context]: + ) -> tuple[str, Message, Context]: """Run a client run.""" # Pass message through ClientApp and return a message # return also cid which is needed to ensure results @@ -81,7 +81,7 @@ def __init__(self, on_actor_init_fn: Optional[Callable[[], None]] = None) -> Non on_actor_init_fn() -def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> int: +def pool_size_from_resources(client_resources: dict[str, Union[int, float]]) -> int: """Calculate number of Actors that fit in the cluster. For this we consider the resources available on each node and those required per @@ -124,14 +124,14 @@ def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> WARNING, "The ActorPool is empty. The system (CPUs=%s, GPUs=%s) " "does not meet the criteria to host at least one client with resources:" - " %s. Lowering the `client_resources` could help.", + " %s. Lowering these resources could help.", num_cpus, num_gpus, client_resources, ) raise ValueError( "ActorPool is empty. Stopping Simulation. " - "Check 'client_resources' passed to `start_simulation`" + "Check `num_cpus` and/or `num_gpus` passed to the simulation engine" ) return total_num_actors @@ -162,9 +162,9 @@ class VirtualClientEngineActorPool(ActorPool): def __init__( self, - create_actor_fn: Callable[[], Type[VirtualClientEngineActor]], - client_resources: Dict[str, Union[int, float]], - actor_list: Optional[List[Type[VirtualClientEngineActor]]] = None, + create_actor_fn: Callable[[], type[VirtualClientEngineActor]], + client_resources: dict[str, Union[int, float]], + actor_list: Optional[list[type[VirtualClientEngineActor]]] = None, ): self.client_resources = client_resources self.create_actor_fn = create_actor_fn @@ -183,10 +183,10 @@ def __init__( # A dict that maps cid to another dict containing: a reference to the remote job # and its status (i.e. whether it is ready or not) - self._cid_to_future: Dict[ - str, Dict[str, Union[bool, Optional[ObjectRef[Any]]]] + self._cid_to_future: dict[ + str, dict[str, Union[bool, Optional[ObjectRef[Any]]]] ] = {} - self.actor_to_remove: Set[str] = set() # a set + self.actor_to_remove: set[str] = set() # a set self.num_actors = len(actors) self.lock = threading.RLock() @@ -210,7 +210,7 @@ def add_actors_to_pool(self, num_actors: int) -> None: self._idle_actors.extend(new_actors) self.num_actors += num_actors - def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> None: + def submit(self, fn: Any, value: tuple[ClientAppFn, Message, str, Context]) -> None: """Take an idle actor and assign it to run a client app and Message. Submit a job to an actor by first removing it from the list of idle actors, then @@ -220,7 +220,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N actor = self._idle_actors.pop() if self._check_and_remove_actor_from_pool(actor): future = fn(actor, app_fn, mssg, cid, context) - future_key = tuple(future) if isinstance(future, List) else future + future_key = tuple(future) if isinstance(future, list) else future self._future_to_actor[future_key] = (self._next_task_index, actor, cid) self._next_task_index += 1 @@ -228,7 +228,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N self._cid_to_future[cid]["future"] = future_key def submit_client_job( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> None: """Submit a job while tracking client ids.""" _, _, cid, _ = job @@ -268,7 +268,7 @@ def _is_future_ready(self, cid: str) -> bool: return self._cid_to_future[cid]["ready"] # type: ignore - def _fetch_future_result(self, cid: str) -> Tuple[Message, Context]: + def _fetch_future_result(self, cid: str) -> tuple[Message, Context]: """Fetch result and updated context for a VirtualClient from Object Store. The job submitted by the ClientProxy interfacing with client with cid=cid is @@ -382,7 +382,7 @@ def process_unordered_future(self, timeout: Optional[float] = None) -> None: def get_client_result( self, cid: str, timeout: Optional[float] - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Get result from VirtualClient with specific cid.""" # Loop until all jobs submitted to the pool are completed. Break early # if the result for the ClientProxy calling this method is ready @@ -403,14 +403,14 @@ class BasicActorPool: def __init__( self, - actor_type: Type[VirtualClientEngineActor], - client_resources: Dict[str, Union[int, float]], - actor_kwargs: Dict[str, Any], + actor_type: type[VirtualClientEngineActor], + client_resources: dict[str, Union[int, float]], + actor_kwargs: dict[str, Any], ): self.client_resources = client_resources # Queue of idle actors - self.pool: List[VirtualClientEngineActor] = [] + self.pool: list[VirtualClientEngineActor] = [] self.num_actors = 0 # Resolve arguments to pass during actor init @@ -424,7 +424,7 @@ def __init__( # Figure out how many actors can be created given the cluster resources # and the resources the user indicates each VirtualClient will need self.actors_capacity = pool_size_from_resources(client_resources) - self._future_to_actor: Dict[Any, VirtualClientEngineActor] = {} + self._future_to_actor: dict[Any, VirtualClientEngineActor] = {} def is_actor_available(self) -> bool: """Return true if there is an idle actor.""" @@ -450,7 +450,7 @@ def terminate_all_actors(self) -> None: log(DEBUG, "Terminated %i actors", num_terminated) def submit( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> Any: """On idle actor, submit job and return future.""" # Remove idle actor from pool @@ -470,7 +470,7 @@ def add_actor_back_to_pool(self, future: Any) -> None: def fetch_result_and_return_actor_to_pool( self, future: Any - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" # Retrieve result for object store # Instead of doing ray.get(future) we await it diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 1c2aa455d9cd..ce0ef46d135f 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -17,7 +17,6 @@ from math import pi from random import shuffle -from typing import Dict, List, Tuple, Type import ray @@ -60,7 +59,7 @@ def __init__(self, node_id: int, state: RecordSet) -> None: self.node_id = node_id self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = self.node_id * pi # store something in context @@ -76,14 +75,14 @@ def get_dummy_client(context: Context) -> Client: def prep( - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, -) -> Tuple[ - List[RayActorClientProxy], VirtualClientEngineActorPool, NodeToPartitionMapping + actor_type: type[VirtualClientEngineActor] = ClientAppActor, +) -> tuple[ + list[RayActorClientProxy], VirtualClientEngineActorPool, NodeToPartitionMapping ]: # pragma: no cover """Prepare ClientProxies and pool for tests.""" client_resources = {"num_cpus": 1, "num_gpus": 0.0} - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options(**client_resources).remote() # type: ignore # Create actor pool @@ -195,7 +194,7 @@ def test_cid_consistency_without_proxies() -> None: node_ids = list(mapping.keys()) # register node states - node_states: Dict[int, NodeState] = {} + node_states: dict[int, NodeState] = {} for node_id, partition_id in mapping.items(): node_states[node_id] = NodeState( node_id=node_id, diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index af12da4a5814..2d29629c4f01 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -25,7 +25,7 @@ from logging import DEBUG, ERROR, INFO, WARNING from pathlib import Path from time import sleep -from typing import Any, List, Optional +from typing import Any, Optional from flwr.cli.config_utils import load_and_validate from flwr.client import ClientApp @@ -56,7 +56,7 @@ def _check_args_do_not_interfere(args: Namespace) -> bool: mode_one_args = ["app", "run_config"] mode_two_args = ["client_app", "server_app"] - def _resolve_message(conflict_keys: List[str]) -> str: + def _resolve_message(conflict_keys: list[str]) -> str: return ",".join([f"`--{key}`".replace("_", "-") for key in conflict_keys]) # When passing `--app`, `--app-dir` is ignored @@ -109,6 +109,11 @@ def run_simulation_from_cli() -> None: """Run Simulation Engine from the CLI.""" args = _parse_args_run_simulation().parse_args() + event( + EventType.CLI_FLOWER_SIMULATION_ENTER, + event_details={"backend": args.backend, "num-supernodes": args.num_supernodes}, + ) + # Add warnings for deprecated server_app and client_app arguments if args.server_app: warn_deprecated_feature( @@ -177,7 +182,9 @@ def run_simulation_from_cli() -> None: client_app_attr = app_components["clientapp"] server_app_attr = app_components["serverapp"] - override_config = parse_config_args([args.run_config]) + override_config = parse_config_args( + [args.run_config] if args.run_config else args.run_config + ) fused_config = get_fused_config_from_dir(app_path, override_config) app_dir = args.app is_app = True @@ -209,9 +216,11 @@ def run_simulation_from_cli() -> None: app_dir=app_dir, run=run, enable_tf_gpu_growth=args.enable_tf_gpu_growth, + delay_start=args.delay_start, verbose_logging=args.verbose, server_app_run_config=fused_config, is_app=is_app, + exit_event=EventType.CLI_FLOWER_SIMULATION_LEAVE, ) @@ -265,6 +274,11 @@ def run_simulation( When disabled, only INFO, WARNING and ERROR log messages will be shown. If enabled, DEBUG-level logs will be displayed. """ + event( + EventType.PYTHON_API_RUN_SIMULATION_ENTER, + event_details={"backend": backend_name, "num-supernodes": num_supernodes}, + ) + if enable_tf_gpu_growth: warn_deprecated_feature_with_example( "Passing `enable_tf_gpu_growth=True` is deprecated.", @@ -282,6 +296,7 @@ def run_simulation( backend_config=backend_config, enable_tf_gpu_growth=enable_tf_gpu_growth, verbose_logging=verbose_logging, + exit_event=EventType.PYTHON_API_RUN_SIMULATION_LEAVE, ) @@ -295,7 +310,6 @@ def run_serverapp_th( f_stop: threading.Event, has_exception: threading.Event, enable_tf_gpu_growth: bool, - delay_launch: int = 3, ) -> threading.Thread: """Run SeverApp in a thread.""" @@ -351,7 +365,6 @@ def server_th_with_start_checks( server_app, ), ) - sleep(delay_launch) serverapp_th.start() return serverapp_th @@ -365,6 +378,8 @@ def _main_loop( is_app: bool, enable_tf_gpu_growth: bool, run: Run, + exit_event: EventType, + delay_start: int, flwr_dir: Optional[str] = None, client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, @@ -372,7 +387,7 @@ def _main_loop( server_app_attr: Optional[str] = None, server_app_run_config: Optional[UserConfig] = None, ) -> None: - """Launch SuperLink with Simulation Engine, then ServerApp on a separate thread.""" + """Start ServerApp on a separate thread, then launch Simulation Engine.""" # Initialize StateFactory state_factory = StateFactory(":flwr-in-memory-state:") @@ -380,6 +395,7 @@ def _main_loop( # A Threading event to indicate if an exception was raised in the ServerApp thread server_app_thread_has_exception = threading.Event() serverapp_th = None + success = True try: # Register run log(DEBUG, "Pre-registering run with id %s", run.run_id) @@ -403,8 +419,10 @@ def _main_loop( enable_tf_gpu_growth=enable_tf_gpu_growth, ) - # SuperLink with Simulation Engine - event(EventType.RUN_SUPERLINK_ENTER) + # Buffer time so the `ServerApp` in separate thread is ready + log(DEBUG, "Buffer time delay: %ds", delay_start) + sleep(delay_start) + # Start Simulation Engine vce.start_vce( num_supernodes=num_supernodes, client_app_attr=client_app_attr, @@ -422,13 +440,13 @@ def _main_loop( except Exception as ex: log(ERROR, "An exception occurred !! %s", ex) log(ERROR, traceback.format_exc()) + success = False raise RuntimeError("An error was encountered. Ending simulation.") from ex finally: # Trigger stop event f_stop.set() - - event(EventType.RUN_SUPERLINK_LEAVE) + event(exit_event, event_details={"success": success}) if serverapp_th: serverapp_th.join() if server_app_thread_has_exception.is_set(): @@ -440,6 +458,7 @@ def _main_loop( # pylint: disable=too-many-arguments,too-many-locals def _run_simulation( num_supernodes: int, + exit_event: EventType, client_app: Optional[ClientApp] = None, server_app: Optional[ServerApp] = None, backend_name: str = "ray", @@ -451,6 +470,7 @@ def _run_simulation( flwr_dir: Optional[str] = None, run: Optional[Run] = None, enable_tf_gpu_growth: bool = False, + delay_start: int = 5, verbose_logging: bool = False, is_app: bool = False, ) -> None: @@ -506,6 +526,8 @@ def _run_simulation( is_app, enable_tf_gpu_growth, run, + exit_event, + delay_start, flwr_dir, client_app, client_app_attr, @@ -593,6 +615,13 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: "Read more about how `tf.config.experimental.set_memory_growth()` works in " "the TensorFlow documentation: https://www.tensorflow.org/api/stable.", ) + parser.add_argument( + "--delay-start", + type=int, + default=3, + help="Buffer time (in seconds) to delay the start the simulation engine after " + "the `ServerApp`, which runs in a separate thread, has been launched.", + ) parser.add_argument( "--verbose", action="store_true", diff --git a/src/py/flwr/superexec/__init__.py b/src/py/flwr/superexec/__init__.py index a510c41f4182..0584ca663a02 100644 --- a/src/py/flwr/superexec/__init__.py +++ b/src/py/flwr/superexec/__init__.py @@ -13,9 +13,3 @@ # limitations under the License. # ============================================================================== """Flower SuperExec service.""" - -from .app import run_superexec as run_superexec - -__all__ = [ - "run_superexec", -] diff --git a/src/py/flwr/superexec/app.py b/src/py/flwr/superexec/app.py index 9510479ec8e1..c00aa0f88e7b 100644 --- a/src/py/flwr/superexec/app.py +++ b/src/py/flwr/superexec/app.py @@ -18,14 +18,14 @@ import sys from logging import INFO, WARN from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import grpc from flwr.common import EventType, event, log from flwr.common.address import parse_address from flwr.common.config import parse_config_args -from flwr.common.constant import SUPEREXEC_DEFAULT_ADDRESS +from flwr.common.constant import EXEC_API_DEFAULT_ADDRESS from flwr.common.exit_handlers import register_exit_handlers from flwr.common.object_ref import load_app, validate @@ -56,7 +56,9 @@ def run_superexec() -> None: address=address, executor=_load_executor(args), certificates=certificates, - config=parse_config_args([args.executor_config]), + config=parse_config_args( + [args.executor_config] if args.executor_config else args.executor_config + ), ) grpc_servers = [superexec_server] @@ -79,7 +81,7 @@ def _parse_args_run_superexec() -> argparse.ArgumentParser: parser.add_argument( "--address", help="SuperExec (gRPC) server address (IPv4, IPv6, or a domain name)", - default=SUPEREXEC_DEFAULT_ADDRESS, + default=EXEC_API_DEFAULT_ADDRESS, ) parser.add_argument( "--executor", @@ -128,7 +130,7 @@ def _parse_args_run_superexec() -> argparse.ArgumentParser: def _try_obtain_certificates( args: argparse.Namespace, -) -> Optional[Tuple[bytes, bytes, bytes]]: +) -> Optional[tuple[bytes, bytes, bytes]]: # Obtain certificates if args.insecure: log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") diff --git a/src/py/flwr/superexec/deployment.py b/src/py/flwr/superexec/deployment.py index 2354e047a1ec..331fd817228e 100644 --- a/src/py/flwr/superexec/deployment.py +++ b/src/py/flwr/superexec/deployment.py @@ -23,13 +23,13 @@ from typing_extensions import override from flwr.cli.install import install_from_fab +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.serde import fab_to_proto, user_config_to_proto from flwr.common.typing import Fab, UserConfig -from flwr.proto.driver_pb2 import CreateRunRequest # pylint: disable=E0611 from flwr.proto.driver_pb2_grpc import DriverStub -from flwr.server.driver.grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER +from flwr.proto.run_pb2 import CreateRunRequest # pylint: disable=E0611 from .executor import Executor, RunTracker @@ -50,7 +50,7 @@ class DeploymentEngine(Executor): def __init__( self, - superlink: str = DEFAULT_SERVER_ADDRESS_DRIVER, + superlink: str = DRIVER_API_DEFAULT_ADDRESS, root_certificates: Optional[str] = None, flwr_dir: Optional[str] = None, ) -> None: @@ -167,6 +167,8 @@ def start_run( # Execute the command proc = subprocess.Popen( # pylint: disable=consider-using-with command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True, ) log(INFO, "Started run %s", str(run_id)) diff --git a/src/py/flwr/superexec/exec_grpc.py b/src/py/flwr/superexec/exec_grpc.py index a32ebc1b3e35..017395bc8002 100644 --- a/src/py/flwr/superexec/exec_grpc.py +++ b/src/py/flwr/superexec/exec_grpc.py @@ -15,7 +15,7 @@ """SuperExec gRPC API.""" from logging import INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -32,7 +32,7 @@ def run_superexec_api_grpc( address: str, executor: Executor, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], config: UserConfig, ) -> grpc.Server: """Run SuperExec API (gRPC, request-response).""" diff --git a/src/py/flwr/superexec/exec_servicer.py b/src/py/flwr/superexec/exec_servicer.py index dda3e96994de..ebb12b5ddbd2 100644 --- a/src/py/flwr/superexec/exec_servicer.py +++ b/src/py/flwr/superexec/exec_servicer.py @@ -15,8 +15,13 @@ """SuperExec API servicer.""" +import select +import sys +import threading +import time +from collections.abc import Generator from logging import ERROR, INFO -from typing import Any, Dict, Generator +from typing import Any import grpc @@ -32,13 +37,15 @@ from .executor import Executor, RunTracker +SELECT_TIMEOUT = 1 # Timeout for selecting ready-to-read file descriptors (in seconds) + class ExecServicer(exec_pb2_grpc.ExecServicer): """SuperExec API servicer.""" def __init__(self, executor: Executor) -> None: self.executor = executor - self.runs: Dict[int, RunTracker] = {} + self.runs: dict[int, RunTracker] = {} def StartRun( self, request: StartRunRequest, context: grpc.ServicerContext @@ -58,13 +65,72 @@ def StartRun( self.runs[run.run_id] = run + # Start a background thread to capture the log output + capture_thread = threading.Thread( + target=_capture_logs, args=(run,), daemon=True + ) + capture_thread.start() + return StartRunResponse(run_id=run.run_id) - def StreamLogs( + def StreamLogs( # pylint: disable=C0103 self, request: StreamLogsRequest, context: grpc.ServicerContext ) -> Generator[StreamLogsResponse, Any, None]: """Get logs.""" - logs = ["a", "b", "c"] + log(INFO, "ExecServicer.StreamLogs") + + # Exit if `run_id` not found + if request.run_id not in self.runs: + context.abort(grpc.StatusCode.NOT_FOUND, "Run ID not found") + + last_sent_index = 0 while context.is_active(): - for i in range(len(logs)): # pylint: disable=C0200 + # Yield n'th row of logs, if n'th row < len(logs) + logs = self.runs[request.run_id].logs + for i in range(last_sent_index, len(logs)): yield StreamLogsResponse(log_output=logs[i]) + last_sent_index = len(logs) + + # Wait for and continue to yield more log responses only if the + # run isn't completed yet. If the run is finished, the entire log + # is returned at this point and the server ends the stream. + if self.runs[request.run_id].proc.poll() is not None: + log(INFO, "All logs for run ID `%s` returned", request.run_id) + context.set_code(grpc.StatusCode.OK) + context.cancel() + + time.sleep(1.0) # Sleep briefly to avoid busy waiting + + +def _capture_logs( + run: RunTracker, +) -> None: + while True: + # Explicitly check if Popen.poll() is None. Required for `pytest`. + if run.proc.poll() is None: + # Select streams only when ready to read + ready_to_read, _, _ = select.select( + [run.proc.stdout, run.proc.stderr], + [], + [], + SELECT_TIMEOUT, + ) + # Read from std* and append to RunTracker.logs + for stream in ready_to_read: + # Flush stdout to view output in real time + readline = stream.readline() + sys.stdout.write(readline) + sys.stdout.flush() + # Append to logs + line = readline.rstrip() + if line: + run.logs.append(f"{line}") + + # Close std* to prevent blocking + elif run.proc.poll() is not None: + log(INFO, "Subprocess finished, exiting log capture") + if run.proc.stdout: + run.proc.stdout.close() + if run.proc.stderr: + run.proc.stderr.close() + break diff --git a/src/py/flwr/superexec/exec_servicer_test.py b/src/py/flwr/superexec/exec_servicer_test.py index 83717d63a36e..b777bc806fe5 100644 --- a/src/py/flwr/superexec/exec_servicer_test.py +++ b/src/py/flwr/superexec/exec_servicer_test.py @@ -16,11 +16,11 @@ import subprocess -from unittest.mock import MagicMock +from unittest.mock import MagicMock, Mock from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 -from .exec_servicer import ExecServicer +from .exec_servicer import ExecServicer, _capture_logs def test_start_run() -> None: @@ -50,3 +50,20 @@ def test_start_run() -> None: response = servicer.StartRun(request, context_mock) assert response.run_id == 10 + + +def test_capture_logs() -> None: + """Test capture_logs function.""" + run_res = Mock() + run_res.logs = [] + with subprocess.Popen( + ["echo", "success"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) as proc: + run_res.proc = proc + _capture_logs(run_res) + + assert len(run_res.logs) == 1 + assert run_res.logs[0] == "success" diff --git a/src/py/flwr/superexec/executor.py b/src/py/flwr/superexec/executor.py index 8d630d108b66..08b66a438e4d 100644 --- a/src/py/flwr/superexec/executor.py +++ b/src/py/flwr/superexec/executor.py @@ -15,7 +15,7 @@ """Execute and monitor a Flower run.""" from abc import ABC, abstractmethod -from dataclasses import dataclass +from dataclasses import dataclass, field from subprocess import Popen from typing import Optional @@ -28,6 +28,7 @@ class RunTracker: run_id: int proc: Popen # type: ignore + logs: list[str] = field(default_factory=list) class Executor(ABC): diff --git a/src/py/flwr_tool/protoc_test.py b/src/py/flwr_tool/protoc_test.py index 6f9127304f25..f0784a4498d2 100644 --- a/src/py/flwr_tool/protoc_test.py +++ b/src/py/flwr_tool/protoc_test.py @@ -28,4 +28,4 @@ def test_directories() -> None: def test_proto_file_count() -> None: """Test if the correct number of proto files were captured by the glob.""" - assert len(PROTO_FILES) == 13 + assert len(PROTO_FILES) == 14